Add support for Ascend NPU
#7
by
statelesshz
- opened
- modeling_chatglm.py +1 -1
modeling_chatglm.py
CHANGED
@@ -26,7 +26,7 @@ from .configuration_chatglm import ChatGLMConfig
|
|
26 |
|
27 |
# flags required to enable jit fusion kernels
|
28 |
|
29 |
-
if sys.platform != 'darwin':
|
30 |
torch._C._jit_set_profiling_mode(False)
|
31 |
torch._C._jit_set_profiling_executor(False)
|
32 |
torch._C._jit_override_can_fuse_on_cpu(True)
|
|
|
26 |
|
27 |
# flags required to enable jit fusion kernels
|
28 |
|
29 |
+
if sys.platform != 'darwin' and torch.cuda.is_available():
|
30 |
torch._C._jit_set_profiling_mode(False)
|
31 |
torch._C._jit_set_profiling_executor(False)
|
32 |
torch._C._jit_override_can_fuse_on_cpu(True)
|