csukuangfj
commited on
Commit
•
a1e6695
1
Parent(s):
6e5faa0
add more models
Browse files
model.py
CHANGED
@@ -244,6 +244,10 @@ def get_pretrained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer:
|
|
244 |
return korean_models[repo_id](repo_id)
|
245 |
elif repo_id in thai_models:
|
246 |
return thai_models[repo_id](repo_id)
|
|
|
|
|
|
|
|
|
247 |
else:
|
248 |
raise ValueError(f"Unsupported repo_id: {repo_id}")
|
249 |
|
@@ -401,6 +405,43 @@ def _get_korean_pre_trained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer
|
|
401 |
return recognizer
|
402 |
|
403 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
@lru_cache(maxsize=10)
|
405 |
def _get_yifan_thai_pretrained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer:
|
406 |
assert repo_id in (
|
@@ -440,10 +481,46 @@ def _get_yifan_thai_pretrained_model(repo_id: str) -> sherpa_onnx.OfflineRecogni
|
|
440 |
return recognizer
|
441 |
|
442 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
443 |
chinese_dialect_models = {
|
444 |
"csukuangfj/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04": _get_chinese_dialect_models,
|
445 |
}
|
446 |
|
|
|
|
|
|
|
|
|
447 |
chinese_models = {
|
448 |
"csukuangfj/sherpa-onnx-paraformer-zh-2023-03-28": _get_paraformer_zh_pre_trained_model,
|
449 |
"csukuangfj/sherpa-onnx-conformer-zh-stateless2-2023-05-23": _get_wenetspeech_pre_trained_model, # noqa
|
@@ -477,12 +554,20 @@ thai_models = {
|
|
477 |
"yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20": _get_yifan_thai_pretrained_model,
|
478 |
}
|
479 |
|
|
|
|
|
|
|
|
|
480 |
language_to_models = {
|
481 |
"超多种中文方言": list(chinese_dialect_models.keys()),
|
482 |
"Chinese+English": list(chinese_english_mixed_models.keys()),
|
|
|
|
|
|
|
483 |
"Chinese": list(chinese_models.keys()),
|
484 |
"English": list(english_models.keys()),
|
485 |
"Russian": list(russian_models.keys()),
|
486 |
"Korean": list(korean_models.keys()),
|
487 |
"Thai": list(thai_models.keys()),
|
|
|
488 |
}
|
|
|
244 |
return korean_models[repo_id](repo_id)
|
245 |
elif repo_id in thai_models:
|
246 |
return thai_models[repo_id](repo_id)
|
247 |
+
elif repo_id in japanese_models:
|
248 |
+
return japanese_models[repo_id](repo_id)
|
249 |
+
elif repo_id in zh_en_ko_ja_yue_models:
|
250 |
+
return zh_en_ko_ja_yue_models[repo_id](repo_id)
|
251 |
else:
|
252 |
raise ValueError(f"Unsupported repo_id: {repo_id}")
|
253 |
|
|
|
405 |
return recognizer
|
406 |
|
407 |
|
408 |
+
@lru_cache(maxsize=10)
|
409 |
+
def _get_japanese_pre_trained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer:
|
410 |
+
assert repo_id in ("reazon-research/reazonspeech-k2-v2",), repo_id
|
411 |
+
|
412 |
+
encoder_model = _get_nn_model_filename(
|
413 |
+
repo_id=repo_id,
|
414 |
+
filename="encoder-epoch-99-avg-1.int8.onnx",
|
415 |
+
subfolder=".",
|
416 |
+
)
|
417 |
+
|
418 |
+
decoder_model = _get_nn_model_filename(
|
419 |
+
repo_id=repo_id,
|
420 |
+
filename="decoder-epoch-99-avg-1.onnx",
|
421 |
+
subfolder=".",
|
422 |
+
)
|
423 |
+
|
424 |
+
joiner_model = _get_nn_model_filename(
|
425 |
+
repo_id=repo_id,
|
426 |
+
filename="joiner-epoch-99-avg-1.onnx",
|
427 |
+
subfolder=".",
|
428 |
+
)
|
429 |
+
|
430 |
+
tokens = _get_token_filename(repo_id=repo_id, subfolder=".")
|
431 |
+
|
432 |
+
recognizer = sherpa_onnx.OfflineRecognizer.from_transducer(
|
433 |
+
tokens=tokens,
|
434 |
+
encoder=encoder_model,
|
435 |
+
decoder=decoder_model,
|
436 |
+
joiner=joiner_model,
|
437 |
+
num_threads=2,
|
438 |
+
sample_rate=16000,
|
439 |
+
feature_dim=80,
|
440 |
+
)
|
441 |
+
|
442 |
+
return recognizer
|
443 |
+
|
444 |
+
|
445 |
@lru_cache(maxsize=10)
|
446 |
def _get_yifan_thai_pretrained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer:
|
447 |
assert repo_id in (
|
|
|
481 |
return recognizer
|
482 |
|
483 |
|
484 |
+
@lru_cache(maxsize=10)
|
485 |
+
def _get_sense_voice_pre_trained_model(
|
486 |
+
repo_id: str,
|
487 |
+
decoding_method: str,
|
488 |
+
num_active_paths: int,
|
489 |
+
) -> sherpa_onnx.OfflineRecognizer:
|
490 |
+
assert repo_id in [
|
491 |
+
"csukuangfj/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17",
|
492 |
+
], repo_id
|
493 |
+
|
494 |
+
nn_model = _get_nn_model_filename(
|
495 |
+
repo_id=repo_id,
|
496 |
+
filename="model.int8.onnx",
|
497 |
+
subfolder=".",
|
498 |
+
)
|
499 |
+
|
500 |
+
tokens = _get_token_filename(repo_id=repo_id, subfolder=".")
|
501 |
+
|
502 |
+
recognizer = sherpa_onnx.OfflineRecognizer.from_sense_voice(
|
503 |
+
model=nn_model,
|
504 |
+
tokens=tokens,
|
505 |
+
num_threads=2,
|
506 |
+
sample_rate=sample_rate,
|
507 |
+
feature_dim=80,
|
508 |
+
decoding_method="greedy_search",
|
509 |
+
debug=True,
|
510 |
+
use_itn=True,
|
511 |
+
)
|
512 |
+
|
513 |
+
return recognizer
|
514 |
+
|
515 |
+
|
516 |
chinese_dialect_models = {
|
517 |
"csukuangfj/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04": _get_chinese_dialect_models,
|
518 |
}
|
519 |
|
520 |
+
zh_en_ko_ja_yue_models = {
|
521 |
+
"csukuangfj/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17": _get_sense_voice_pre_trained_model,
|
522 |
+
}
|
523 |
+
|
524 |
chinese_models = {
|
525 |
"csukuangfj/sherpa-onnx-paraformer-zh-2023-03-28": _get_paraformer_zh_pre_trained_model,
|
526 |
"csukuangfj/sherpa-onnx-conformer-zh-stateless2-2023-05-23": _get_wenetspeech_pre_trained_model, # noqa
|
|
|
554 |
"yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20": _get_yifan_thai_pretrained_model,
|
555 |
}
|
556 |
|
557 |
+
japanese_models = {
|
558 |
+
"reazon-research/reazonspeech-k2-v2": _get_japanese_pre_trained_model
|
559 |
+
}
|
560 |
+
|
561 |
language_to_models = {
|
562 |
"超多种中文方言": list(chinese_dialect_models.keys()),
|
563 |
"Chinese+English": list(chinese_english_mixed_models.keys()),
|
564 |
+
"Chinese+English+Korean+Japanese+Cantoes(中英韩日粤语)": list(
|
565 |
+
zh_en_ko_ja_yue_models.keys()
|
566 |
+
),
|
567 |
"Chinese": list(chinese_models.keys()),
|
568 |
"English": list(english_models.keys()),
|
569 |
"Russian": list(russian_models.keys()),
|
570 |
"Korean": list(korean_models.keys()),
|
571 |
"Thai": list(thai_models.keys()),
|
572 |
+
"Japanese": list(japanese_models.keys()),
|
573 |
}
|