Spaces:
Runtime error
Runtime error
working
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ from dotenv import load_dotenv
|
|
12 |
import os
|
13 |
from openai import OpenAI
|
14 |
import spacy
|
|
|
15 |
|
16 |
# Load environment variables from .env file
|
17 |
load_dotenv()
|
@@ -33,45 +34,26 @@ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
|
33 |
# Global variable to control debug printing
|
34 |
DEBUG_MODE = True
|
35 |
|
36 |
-
def debug_print(*args, **kwargs):
|
37 |
-
if DEBUG_MODE:
|
38 |
-
print(*args, **kwargs)
|
39 |
-
|
40 |
-
def split_sentences_ginza(input_text):
|
41 |
-
nlp = spacy.load("ja_core_news_sm")
|
42 |
-
doc = nlp(input_text)
|
43 |
-
sentences = [sent.text for sent in doc.sents]
|
44 |
-
return sentences
|
45 |
|
46 |
-
def query_hf(payload, model_name):
|
47 |
-
# HTTP POST Request
|
48 |
-
response = requests.post(API_URL+model_name, headers=headers, json=payload)
|
49 |
-
return response.json()
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
53 |
|
54 |
-
sentences = split_sentences_ginza(input_text) # split into sentences
|
55 |
-
translated_sentences = []
|
56 |
|
57 |
-
|
|
|
|
|
|
|
58 |
|
59 |
-
for sentence in sentences:
|
60 |
-
if sentence.strip(): # Ensure sentence is not empty
|
61 |
-
# API Request for each sentence:
|
62 |
-
response = query_hf({
|
63 |
-
"inputs": sentence.strip(),
|
64 |
-
"options": {"wait_for_model": True}
|
65 |
-
}, "Helsinki-NLP/opus-mt-ja-en")
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
|
71 |
-
# Join the translated sentences
|
72 |
-
translation = ' '.join(translated_sentences)
|
73 |
-
|
74 |
-
return translation
|
75 |
|
76 |
|
77 |
def translate_openai(input_text):
|
@@ -134,8 +116,8 @@ def assess(original_japanese, student_translation):
|
|
134 |
|
135 |
assessor = gr.Interface(fn=assess,
|
136 |
inputs=[
|
137 |
-
gr.Textbox(label="
|
138 |
-
gr.Textbox(label="Student
|
139 |
],
|
140 |
outputs=[
|
141 |
gr.Textbox(label="Machine Generated Translation"),
|
@@ -145,4 +127,69 @@ assessor = gr.Interface(fn=assess,
|
|
145 |
description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback."
|
146 |
)
|
147 |
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
import os
|
13 |
from openai import OpenAI
|
14 |
import spacy
|
15 |
+
import random
|
16 |
|
17 |
# Load environment variables from .env file
|
18 |
load_dotenv()
|
|
|
34 |
# Global variable to control debug printing
|
35 |
DEBUG_MODE = True
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
try:
|
40 |
+
file_path = "momotaro.txt"
|
41 |
+
# Open the file in read mode ('r')
|
42 |
+
with open(file_path, 'r') as file:
|
43 |
+
# Read the entire content of the file into a string
|
44 |
+
file_content = file.read()
|
45 |
|
|
|
|
|
46 |
|
47 |
+
except Exception as e:
|
48 |
+
print( "Error loading short story file.", str(e))
|
49 |
+
|
50 |
+
learning_content = file_content
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
+
def debug_print(*args, **kwargs):
|
54 |
+
if DEBUG_MODE:
|
55 |
+
print(*args, **kwargs)
|
56 |
|
|
|
|
|
|
|
|
|
57 |
|
58 |
|
59 |
def translate_openai(input_text):
|
|
|
116 |
|
117 |
assessor = gr.Interface(fn=assess,
|
118 |
inputs=[
|
119 |
+
gr.Textbox(label="Learning Content", placeholder="Enter content to focus on here", lines=4, value=learning_content),#example_Japanese),#"
|
120 |
+
gr.Textbox(label="Student Data", placeholder="Enter student data here", lines=4, value="EFL Learner")#"This is an example")
|
121 |
],
|
122 |
outputs=[
|
123 |
gr.Textbox(label="Machine Generated Translation"),
|
|
|
127 |
description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback."
|
128 |
)
|
129 |
|
130 |
+
|
131 |
+
def respond(learner_data, learning_content, teacher_prompt, message, chat_history):
|
132 |
+
|
133 |
+
prompt = (
|
134 |
+
f"Teacher prompt: {teacher_prompt}\n"
|
135 |
+
f"Learner data: {learner_data}\n"
|
136 |
+
f"Learning content: {learning_content}\n"
|
137 |
+
|
138 |
+
f"Student: {message}\n"
|
139 |
+
"Chatbot:"
|
140 |
+
)
|
141 |
+
|
142 |
+
# Call the OpenAI API
|
143 |
+
response = client.chat.completions.create(
|
144 |
+
messages=[
|
145 |
+
{
|
146 |
+
"role": "user",
|
147 |
+
"content": prompt,
|
148 |
+
}
|
149 |
+
],
|
150 |
+
model="gpt-3.5-turbo",
|
151 |
+
temperature=0.7,
|
152 |
+
max_tokens=150,
|
153 |
+
stop=["\n", " Student:", " Chatbot:"], # Stop generating if these patterns are found
|
154 |
+
)
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
# Extract the text from the response
|
159 |
+
bot_message = response.choices[0].message.content
|
160 |
+
|
161 |
+
chat_history.append((message, bot_message))
|
162 |
+
debug_print(chat_history)
|
163 |
+
return chat_history
|
164 |
+
|
165 |
+
with gr.Blocks() as demo:
|
166 |
+
with gr.Row():
|
167 |
+
learner_data = gr.Textbox(label="Learner Data", placeholder="Enter learner data here...", lines=4, value="Honoka is a Japanese EFL student.")
|
168 |
+
learning_content = gr.Textbox(label="Learning Content", placeholder="Enter learning content here...", lines=4, value=learning_content)
|
169 |
+
teacher_prompt = gr.Textbox(label="Teacher Prompt", placeholder="Enter chat guidance here...", lines=4,
|
170 |
+
value="You are a professional EFL teacher. Guide the conversation to discuss the learning content below.")
|
171 |
+
|
172 |
+
|
173 |
+
chatbot = gr.Chatbot()
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
with gr.Row():
|
178 |
+
msg = gr.Textbox(label="Your Message", placeholder="Type your message here...")
|
179 |
+
submit_button = gr.Button("Send")
|
180 |
+
|
181 |
+
submit_button.click(
|
182 |
+
respond,
|
183 |
+
inputs=[learner_data, learning_content, teacher_prompt, msg, chatbot],
|
184 |
+
outputs=chatbot,
|
185 |
+
)
|
186 |
+
|
187 |
+
|
188 |
+
#msg = gr.Textbox(placeholder="Type your message here...")
|
189 |
+
clear = gr.ClearButton([msg])
|
190 |
+
|
191 |
+
#msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
|
192 |
+
|
193 |
+
|
194 |
+
if __name__ == "__main__":
|
195 |
+
demo.launch(debug=True)
|