mrfakename commited on
Commit
8468a28
1 Parent(s): 0978fba

Sync from GitHub repo

Browse files

This Space is synced from the GitHub repo: https://github.com/SWivid/F5-TTS. Please submit contributions to the Space there

Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -380,7 +380,9 @@ with gr.Blocks() as app_multistyle:
380
  ref_text = speech_types[current_style].get("ref_text", "")
381
 
382
  # Generate speech for this segment
383
- audio, _ = infer(ref_audio, ref_text, text, model_choice, remove_silence, 0, show_info=None)
 
 
384
  sr, audio_data = audio
385
 
386
  generated_audio_segments.append(audio_data)
@@ -482,12 +484,9 @@ Have a conversation with an AI using your reference voice!
482
  chat_interface_container = gr.Column()
483
 
484
  if chat_model_state is None:
485
- show_info = gr.Info
486
- show_info("Loading chat model...")
487
  model_name = "Qwen/Qwen2.5-3B-Instruct"
488
  chat_model_state = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
489
  chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name)
490
- show_info("Chat model loaded.")
491
 
492
  with chat_interface_container:
493
  with gr.Row():
@@ -578,7 +577,7 @@ Have a conversation with an AI using your reference voice!
578
  remove_silence,
579
  cross_fade_duration=0.15,
580
  speed=1.0,
581
- show_info=None,
582
  )
583
  return audio_result
584
 
 
380
  ref_text = speech_types[current_style].get("ref_text", "")
381
 
382
  # Generate speech for this segment
383
+ audio, _ = infer(
384
+ ref_audio, ref_text, text, model_choice, remove_silence, 0, show_info=print
385
+ ) # show_info=print no pull to top when generating
386
  sr, audio_data = audio
387
 
388
  generated_audio_segments.append(audio_data)
 
484
  chat_interface_container = gr.Column()
485
 
486
  if chat_model_state is None:
 
 
487
  model_name = "Qwen/Qwen2.5-3B-Instruct"
488
  chat_model_state = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
489
  chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name)
 
490
 
491
  with chat_interface_container:
492
  with gr.Row():
 
577
  remove_silence,
578
  cross_fade_duration=0.15,
579
  speed=1.0,
580
+ show_info=print, # show_info=print no pull to top when generating
581
  )
582
  return audio_result
583