PoTaTo721 commited on
Commit
d879c3f
1 Parent(s): ef93c66

remove streaming

Browse files
Files changed (1) hide show
  1. app.py +4 -12
app.py CHANGED
@@ -95,13 +95,12 @@ def inference(
95
  top_p,
96
  repetition_penalty,
97
  temperature,
98
- streaming=False,
99
  ):
100
  if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
101
  return (
102
  None,
103
  None,
104
- "Text is too long, please keep it under {} characters.".format(
105
  args.max_gradio_length
106
  ),
107
  )
@@ -137,16 +136,12 @@ def inference(
137
  )
138
  )
139
 
140
- if streaming:
141
- yield wav_chunk_header(), None, None
142
-
143
  segments = []
144
 
145
  while True:
146
  result: WrappedGenerateResponse = response_queue.get()
147
  if result.status == "error":
148
- yield None, None, build_html_error_message(result.response)
149
- break
150
 
151
  result: GenerateResponse = result.response
152
  if result.action == "next":
@@ -168,9 +163,6 @@ def inference(
168
  fake_audios = fake_audios.float().cpu().numpy()
169
  segments.append(fake_audios)
170
 
171
- if streaming:
172
- yield (fake_audios * 32768).astype(np.int16).tobytes(), None, None
173
-
174
  if len(segments) == 0:
175
  return (
176
  None,
@@ -180,9 +172,9 @@ def inference(
180
  ),
181
  )
182
 
183
- # No matter streaming or not, we need to return the final audio
184
  audio = np.concatenate(segments, axis=0)
185
- yield None, (decoder_model.spec_transform.sample_rate, audio), None
186
 
187
  if torch.cuda.is_available():
188
  torch.cuda.empty_cache()
 
95
  top_p,
96
  repetition_penalty,
97
  temperature,
 
98
  ):
99
  if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
100
  return (
101
  None,
102
  None,
103
+ i18n("Text is too long, please keep it under {} characters.").format(
104
  args.max_gradio_length
105
  ),
106
  )
 
136
  )
137
  )
138
 
 
 
 
139
  segments = []
140
 
141
  while True:
142
  result: WrappedGenerateResponse = response_queue.get()
143
  if result.status == "error":
144
+ return None, None, build_html_error_message(result.response)
 
145
 
146
  result: GenerateResponse = result.response
147
  if result.action == "next":
 
163
  fake_audios = fake_audios.float().cpu().numpy()
164
  segments.append(fake_audios)
165
 
 
 
 
166
  if len(segments) == 0:
167
  return (
168
  None,
 
172
  ),
173
  )
174
 
175
+ # Return the final audio
176
  audio = np.concatenate(segments, axis=0)
177
+ return None, (decoder_model.spec_transform.sample_rate, audio), None
178
 
179
  if torch.cuda.is_available():
180
  torch.cuda.empty_cache()