Spaces:
Runtime error
Runtime error
File size: 6,085 Bytes
6e81085 2efcc87 6e81085 2efcc87 6e81085 d9771ef 6e81085 d9771ef 6e81085 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
# -*- coding: utf-8 -*-
"""translation practice.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1KrnodZGBZrUFdaJ9FIn8IhtWtCL7peoE
"""
import requests
import gradio as gr
#from dotenv import load_dotenv
import os
from openai import OpenAI
import spacy
# Load environment variables from .env file
#load_dotenv()
# Access the env
HF_TOKEN = os.getenv('HUGGING_FACE_TOKEN')
# openai setup
client = OpenAI(
api_key=os.getenv('OPENAI_API_KEY')
)
# hugging face setup
#model_name = "mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf"
API_URL = f"https://api-inference.huggingface.co/models/"
#API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
example_Japanese = '''ใใใซใกใฏ๏ผ
ใฟใชใใใ็งใใกใฎใใฌใผใณใใผใทใงใณใซใฏใฏใฏใฏใใฆใใพใใ๏ผ
ใใชใใใใณใใณใงใฏใใใพใใใ๏ผ
ใงใฏใใใณใใณใใฆใ่ใใฆใใ ใใ๏ผ
ไปๆฅใฎใใฌใผใณใใผใทใงใณใฎใใผใใฏใชใใใใใฎใใใใใใใจใฐใงใใ
ใใฎใใใชใใจใฐใใฟใชใใไฝใ็ฅใฃใฆใใพใใ๏ผ
ๆฅๆฌ่ชใฏใใใใใใใจใฐใๆฏๆฅใคใใใพใใๆฐๆใกใใใใใใใใฒใใใใใงใใใใใใใใใใใใใใใใใใซใฎใใ ใใใใใใงใใใใจใใฐใใใดใใดใใใจ่ใใใใใฉใใใใใพใใ๏ผใฉใใชใคใกใผใธใงใใ๏ผใใฃใฑใใใใใใงใใใฎใฒใใใงใใญใ'''
example_English = '''Hello!
Are you all excited about our presentation?
Aren't you hungry?
So, smile and listen!
The theme of today's presentation is onomatopoeic repetition.
Do you know any such words?
In Japanese, we use repeated words every day. It's easy to understand, easy to understand, and easy to follow. For example, what do you think when you hear the word "pikapika"? What kind of image do you have? After all, it is a beautiful and electric light.'''
def split_sentences_ginza(input_text):
nlp = spacy.load("ja_core_news_sm")
doc = nlp(input_text)
sentences = [sent.text for sent in doc.sents]
return sentences
def query_hf(payload, model_name):
# HTTP POST Request
response = requests.post(API_URL+model_name, headers=headers, json=payload)
return response.json()
def translate_hf(input_text):
print("Translating... ", input_text)
sentences = split_sentences_ginza(input_text) # split into sentences
translated_sentences = []
print("Split sentences... ", sentences)
for sentence in sentences:
if sentence.strip(): # Ensure sentence is not empty
# API Request for each sentence:
response = query_hf({
"inputs": sentence.strip(),
"options": {"wait_for_model": True}
}, "Helsinki-NLP/opus-mt-ja-en")
print("response: ", response)
translated_sentence = response[0]["translation_text"]
translated_sentences.append(translated_sentence)
# Join the translated sentences
translation = ' '.join(translated_sentences)
return translation
def translate_openai(input_text):
prompt = "Translate the following text into Japanese language: " + input_text
response = client.chat.completions.create(
messages=[
{
"role": "user",
"content": prompt,
}
],
model="gpt-3.5-turbo",
temperature=0 # should be the same every time
)
translation = response.choices[0].message.content
print("GPT translation:", translation)
return translation
def assess(original_japanese, student_translation):
try:
# get the English translation
generated_translation = translate_hf(original_japanese)
except Exception as e:
return "Error in processing translation.", str(e)
print("Generated translation:", generated_translation)
try:
prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. "
f"Original: {original_japanese}, "
f"Reference Translation: {generated_translation}, "
f"Student Translation: {student_translation}. "
"Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.")
print(prompt)
# Evaluating the student's translation attempt
response = client.chat.completions.create(
messages=[
{
"role": "user",
"content": prompt,
}
],
model="gpt-3.5-turbo",
)
print("Full GPT response:", response)
evaluation_feedback = response.choices[0].message.content
print("GPT feedback:", evaluation_feedback)
return generated_translation, evaluation_feedback
except Exception as e:
return "Error in processing evaluation.", str(e)
assessor = gr.Interface(fn=assess,
inputs=[
gr.Textbox(label="Japanese Sentence Input", placeholder="Input text to be translated", lines=1, value="ใใใฏไพใงใ"),#example_Japanese),#"
gr.Textbox(label="Student's Translation Attempt", placeholder="Input your English translation", lines=1, value="This is an example")#"This is an example")
],
outputs=[
gr.Textbox(label="Machine Generated Translation"),
gr.Textbox(label="Evaluation Feedback")
],
title="Translation Practice",
description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback."
)
assessor.launch(debug=True, share=True)
#assessor.launch(debug=True) |