goldbach7 commited on
Commit
3d6a05a
1 Parent(s): b988a51

Update modeling_orion.py

Browse files

change to is_flash_attn_2_available

Files changed (1) hide show
  1. modeling_orion.py +2 -2
modeling_orion.py CHANGED
@@ -25,12 +25,12 @@ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
25
  from transformers.utils import (
26
  add_start_docstrings,
27
  add_start_docstrings_to_model_forward,
28
- is_flash_attn_available,
29
  logging,
30
  replace_return_docstrings,
31
  )
32
 
33
- if is_flash_attn_available():
34
  from flash_attn import flash_attn_func, flash_attn_varlen_func
35
  from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
36
 
 
25
  from transformers.utils import (
26
  add_start_docstrings,
27
  add_start_docstrings_to_model_forward,
28
+ is_flash_attn_2_available,
29
  logging,
30
  replace_return_docstrings,
31
  )
32
 
33
+ if is_flash_attn_2_available():
34
  from flash_attn import flash_attn_func, flash_attn_varlen_func
35
  from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
36