Ilzhabimantara commited on
Commit
a477d7b
1 Parent(s): 8d530f8

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +9 -27
config.py CHANGED
@@ -11,42 +11,24 @@ class Config:
11
  self.gpu_name = None
12
  self.gpu_mem = None
13
  (
14
- self.python_cmd,
15
- self.listen_port,
16
  self.colab,
17
- self.noparallel,
18
- self.noautoopen,
19
- self.api
20
  ) = self.arg_parse()
21
  self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
22
 
23
  @staticmethod
24
  def arg_parse() -> tuple:
25
- exe = sys.executable or "python"
26
  parser = argparse.ArgumentParser()
27
- parser.add_argument("--port", type=int, default=7865, help="Listen port")
28
- parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
29
  parser.add_argument("--colab", action="store_true", help="Launch in colab")
30
- parser.add_argument(
31
- "--noparallel", action="store_true", help="Disable parallel processing"
32
- )
33
- parser.add_argument(
34
- "--noautoopen",
35
- action="store_true",
36
- help="Do not open in browser automatically",
37
- )
38
  parser.add_argument("--api", action="store_true", help="Launch with api")
 
39
  cmd_opts = parser.parse_args()
40
 
41
- cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
42
-
43
  return (
44
- cmd_opts.pycmd,
45
- cmd_opts.port,
46
  cmd_opts.colab,
47
- cmd_opts.noparallel,
48
- cmd_opts.noautoopen,
49
- cmd_opts.api
50
  )
51
 
52
  # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
@@ -72,10 +54,10 @@ class Config:
72
  or "1070" in self.gpu_name
73
  or "1080" in self.gpu_name
74
  ):
75
- print("Found GPU", self.gpu_name, ", force to fp32")
76
  self.is_half = False
77
  else:
78
- print("Found GPU", self.gpu_name)
79
  self.gpu_mem = int(
80
  torch.cuda.get_device_properties(i_device).total_memory
81
  / 1024
@@ -84,11 +66,11 @@ class Config:
84
  + 0.4
85
  )
86
  elif self.has_mps():
87
- print("No supported Nvidia GPU found, use MPS instead")
88
  self.device = "mps"
89
  self.is_half = False
90
  else:
91
- print("No supported Nvidia GPU found, use CPU instead")
92
  self.device = "cpu"
93
  self.is_half = False
94
 
 
11
  self.gpu_name = None
12
  self.gpu_mem = None
13
  (
 
 
14
  self.colab,
15
+ self.api,
16
+ self.unsupported
 
17
  ) = self.arg_parse()
18
  self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
19
 
20
  @staticmethod
21
  def arg_parse() -> tuple:
 
22
  parser = argparse.ArgumentParser()
 
 
23
  parser.add_argument("--colab", action="store_true", help="Launch in colab")
 
 
 
 
 
 
 
 
24
  parser.add_argument("--api", action="store_true", help="Launch with api")
25
+ parser.add_argument("--unsupported", action="store_true", help="Enable unsupported feature")
26
  cmd_opts = parser.parse_args()
27
 
 
 
28
  return (
 
 
29
  cmd_opts.colab,
30
+ cmd_opts.api,
31
+ cmd_opts.unsupported
 
32
  )
33
 
34
  # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
 
54
  or "1070" in self.gpu_name
55
  or "1080" in self.gpu_name
56
  ):
57
+ print("INFO: Found GPU", self.gpu_name, ", force to fp32")
58
  self.is_half = False
59
  else:
60
+ print("INFO: Found GPU", self.gpu_name)
61
  self.gpu_mem = int(
62
  torch.cuda.get_device_properties(i_device).total_memory
63
  / 1024
 
66
  + 0.4
67
  )
68
  elif self.has_mps():
69
+ print("INFO: No supported Nvidia GPU found, use MPS instead")
70
  self.device = "mps"
71
  self.is_half = False
72
  else:
73
+ print("INFO: No supported Nvidia GPU found, use CPU instead")
74
  self.device = "cpu"
75
  self.is_half = False
76