sujeshpadhi commited on
Commit
db58511
1 Parent(s): 9e97375

uploaded config and gui files

Browse files
Files changed (3) hide show
  1. __init__.py +0 -0
  2. config.json +39 -0
  3. gui.py +47 -0
__init__.py ADDED
File without changes
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "device": "cuda",
3
+
4
+ "dataset": {
5
+ "ids": [2],
6
+ "cache_dir": "../dataset",
7
+ "batch_size": 16,
8
+ "num_workers": 8
9
+ },
10
+
11
+ "model": {
12
+ "freeze_till": -1
13
+ },
14
+
15
+ "optim_args": {
16
+ "lr": 1e-4
17
+ },
18
+
19
+ "trainer": {
20
+ "limit_train_batches": 0.25,
21
+ "max_epochs": 1,
22
+ "deterministic": false,
23
+ "log_every_n_steps": 2048,
24
+ "accelerator": "gpu",
25
+ "check_val_every_n_epoch": 1,
26
+ "precision": "16-mixed",
27
+ "enable_progress_bar": true,
28
+ "default_root_dir": "./logs",
29
+ "enable_checkpointing": true,
30
+ "benchmark": true,
31
+ "max_time": null
32
+ },
33
+
34
+ "weight": "./saved/models/T5-v4.pth",
35
+
36
+ "fit": {
37
+ "ckpt_path": null
38
+ }
39
+ }
gui.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #installing the gradio transformer
2
+ #!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch
3
+
4
+ import gradio as gr
5
+ #from transformers import AutoModelForSeq2SeqLM, pipeline
6
+ import torch
7
+ import json
8
+ from transformers import T5ForConditionalGeneration
9
+ from model.T5 import T5
10
+
11
+ # this model was loaded from https://hf.co/models
12
+ model = T5('t5-small').to('cuda')
13
+ LANGS = ["English", "German", "Italian", "Dutch", "Romanian", "French"]
14
+
15
+ # Load the weights
16
+ with open("config.json", "r") as f:
17
+ config = json.load(f)
18
+
19
+ model.load_state_dict(torch.load(config["weight"]))
20
+
21
+ def translate(text, src_lang, tgt_lang):
22
+ """
23
+ Translate the text from source lang to target lang
24
+ """
25
+
26
+ inputs = ["translate "+src_lang+" to "+tgt_lang+": "+text]
27
+
28
+ with torch.inference_mode():
29
+ outputs = model.predict(inputs)
30
+
31
+ return outputs[0]
32
+
33
+ demo = gr.Interface(
34
+ fn=translate,
35
+ inputs=[
36
+ gr.components.Textbox(label="Text"),
37
+ gr.components.Dropdown(label="Source Language", choices=LANGS),
38
+ gr.components.Dropdown(label="Target Language", choices=LANGS),
39
+ ],
40
+ outputs=["text"],
41
+ #examples=[["Building a translation demo with Gradio is so easy!", "eng_Latn", "spa_Latn"]],
42
+ cache_examples=False,
43
+ title="Language Translator",
44
+ description="This is a GUI for the Language Translation System"
45
+ )
46
+
47
+ demo.launch(share=True)