Omnibus commited on
Commit
e5cc640
β€’
0 Parent(s):

Duplicate from Omnibus/Translate-100

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +154 -0
  4. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Translate 100
3
+ emoji: >-
4
+ πŸ‘­πŸ‘©πŸ»β€πŸ€β€πŸ‘©πŸ»πŸ‘©πŸΌβ€πŸ€β€πŸ‘©πŸ»πŸ‘©πŸΌβ€πŸ€β€πŸ‘©πŸΌπŸ‘©πŸ½β€πŸ€β€πŸ‘©πŸ»πŸ‘©πŸ½β€πŸ€β€πŸ‘©πŸΌπŸ‘©πŸ½β€πŸ€β€πŸ‘©πŸ½πŸ‘©πŸΎβ€πŸ€β€πŸ‘©πŸ»πŸ‘©πŸΎβ€πŸ€β€πŸ‘©πŸΌπŸ‘©πŸΎβ€πŸ€β€πŸ‘©πŸ½πŸ‘«πŸ‘©πŸ»β€πŸ€β€πŸ§‘πŸ»πŸ‘©πŸΌβ€πŸ€β€πŸ§‘πŸΎπŸ‘©πŸΌβ€πŸ€β€πŸ§‘πŸΌπŸ‘©πŸ»β€πŸ€β€πŸ§‘πŸ½πŸ‘©πŸ»β€πŸ€β€πŸ§‘πŸΎπŸ‘©πŸΏβ€πŸ€β€πŸ‘©πŸ½
5
+ colorFrom: gray
6
+ colorTo: blue
7
+ sdk: gradio
8
+ sdk_version: 3.17.0
9
+ app_file: app.py
10
+ pinned: true
11
+ duplicated_from: Omnibus/Translate-100
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import torch
4
+ import gradio as gr
5
+ from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
6
+
7
+ if torch.cuda.is_available():
8
+ device = torch.device("cuda:0")
9
+ else:
10
+ device = torch.device("cpu")
11
+
12
+ tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_1.2B")
13
+ model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_1.2B").to(device)
14
+ model.eval()
15
+
16
+ lang_id = {
17
+ "":"",
18
+ "Afrikaans": "af",
19
+ "Albanian": "sq",
20
+ "Amharic": "am",
21
+ "Arabic": "ar",
22
+ "Armenian": "hy",
23
+ "Asturian": "ast",
24
+ "Azerbaijani": "az",
25
+ "Bashkir": "ba",
26
+ "Belarusian": "be",
27
+ "Bulgarian": "bg",
28
+ "Bengali": "bn",
29
+ "Breton": "br",
30
+ "Bosnian": "bs",
31
+ "Burmese": "my",
32
+ "Catalan": "ca",
33
+ "Cebuano": "ceb",
34
+ "Chinese": "zh",
35
+ "Chinese (simplified)": "zh",
36
+ "Chinese (traditional)": "zh",
37
+ "Croatian": "hr",
38
+ "Czech": "cs",
39
+ "Danish": "da",
40
+ "Dutch": "nl",
41
+ "English": "en",
42
+ "Estonian": "et",
43
+ "Fulah": "ff",
44
+ "Finnish": "fi",
45
+ "French": "fr",
46
+ "Western Frisian": "fy",
47
+ "Gaelic": "gd",
48
+ "Galician": "gl",
49
+ "Georgian": "ka",
50
+ "German": "de",
51
+ "Greek": "el",
52
+ "Gujarati": "gu",
53
+ "Hausa": "ha",
54
+ "Hebrew": "he",
55
+ "Hindi": "hi",
56
+ "Haitian": "ht",
57
+ "Hungarian": "hu",
58
+ "Irish": "ga",
59
+ "Indonesian": "id",
60
+ "Igbo": "ig",
61
+ "Iloko": "ilo",
62
+ "Icelandic": "is",
63
+ "Italian": "it",
64
+ "Japanese": "ja",
65
+ "Javanese": "jv",
66
+ "Kazakh": "kk",
67
+ "Central Khmer": "km",
68
+ "Kannada": "kn",
69
+ "Korean": "ko",
70
+ "Luxembourgish": "lb",
71
+ "Ganda": "lg",
72
+ "Lingala": "ln",
73
+ "Lao": "lo",
74
+ "Lithuanian": "lt",
75
+ "Latvian": "lv",
76
+ "Malagasy": "mg",
77
+ "Macedonian": "mk",
78
+ "Malayalam": "ml",
79
+ "Mongolian": "mn",
80
+ "Marathi": "mr",
81
+ "Malay": "ms",
82
+ "Nepali": "ne",
83
+ "Norwegian": "no",
84
+ "Northern Sotho": "ns",
85
+ "Occitan": "oc",
86
+ "Oriya": "or",
87
+ "Panjabi": "pa",
88
+ "Persian": "fa",
89
+ "Polish": "pl",
90
+ "Pushto": "ps",
91
+ "Portuguese": "pt",
92
+ "Romanian": "ro",
93
+ "Russian": "ru",
94
+ "Sindhi": "sd",
95
+ "Sinhala": "si",
96
+ "Slovak": "sk",
97
+ "Slovenian": "sl",
98
+ "Spanish": "es",
99
+ "Somali": "so",
100
+ "Serbian": "sr",
101
+ "Serbian (cyrillic)": "sr",
102
+ "Serbian (latin)": "sr",
103
+ "Swati": "ss",
104
+ "Sundanese": "su",
105
+ "Swedish": "sv",
106
+ "Swahili": "sw",
107
+ "Tamil": "ta",
108
+ "Thai": "th",
109
+ "Tagalog": "tl",
110
+ "Tswana": "tn",
111
+ "Turkish": "tr",
112
+ "Ukrainian": "uk",
113
+ "Urdu": "ur",
114
+ "Uzbek": "uz",
115
+ "Vietnamese": "vi",
116
+ "Welsh": "cy",
117
+ "Wolof": "wo",
118
+ "Xhosa": "xh",
119
+ "Yiddish": "yi",
120
+ "Yoruba": "yo",
121
+ "Zulu": "zu",
122
+ }
123
+
124
+ def trans_to(input,src,trg):
125
+ src_lang = lang_id[src]
126
+ trg_lang = lang_id[trg]
127
+ if trg_lang != src_lang:
128
+ tokenizer.src_lang = src_lang
129
+ with torch.no_grad():
130
+ encoded_input = tokenizer(input, return_tensors="pt").to(device)
131
+ generated_tokens = model.generate(**encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang))
132
+ translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
133
+ else:
134
+ translated_text=input
135
+ pass
136
+ return translated_text
137
+
138
+ with gr.Blocks() as transbot:
139
+ with gr.Row():
140
+ gr.Column()
141
+ with gr.Column():
142
+ gr.Markdown("""<h1><center>Translate - 100 Languages</center></h1><h4><center>Translation may not be accurate</center></h4>""")
143
+ with gr.Row():
144
+ lang_from = gr.Dropdown(label="From:", choices=list(lang_id.keys()),value="English")
145
+ lang_to = gr.Dropdown(label="To:", choices=list(lang_id.keys()),value="Chinese")
146
+ submit = gr.Button("Go")
147
+ with gr.Row():
148
+ with gr.Column():
149
+ message = gr.Textbox(label="Prompt",placeholder="Enter Prompt",lines=4)
150
+ translated = gr.Textbox(label="Translated",lines=4,interactive=False)
151
+ gr.Column()
152
+ submit.click(trans_to, inputs=[message,lang_from,lang_to], outputs=[translated])
153
+ transbot.queue(concurrency_count=20)
154
+ transbot.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ transformers[sentencepiece]