Spaces:
Sleeping
Sleeping
chat history added
Browse files
app.py
CHANGED
@@ -7,7 +7,6 @@ import soundfile as sf
|
|
7 |
from pdfminer.high_level import extract_text
|
8 |
from llama_cpp import Llama
|
9 |
|
10 |
-
|
11 |
# Check if MPS is available and set the device
|
12 |
if torch.backends.mps.is_available():
|
13 |
device = torch.device("mps")
|
@@ -26,7 +25,12 @@ def toText(audio):
|
|
26 |
return question
|
27 |
|
28 |
|
|
|
|
|
|
|
29 |
def extract_answer(question, text):
|
|
|
|
|
30 |
# Load the LLaMA model
|
31 |
model_path="/Users/chandima/.cache/lm-studio/models/lmstudio-community/Llama-3.2-3B-Instruct-GGUF/Llama-3.2-3B-Instruct-Q3_K_L.gguf"
|
32 |
# Load the LLaMA model with MPS acceleration
|
@@ -40,21 +44,28 @@ def extract_answer(question, text):
|
|
40 |
use_mmap=True, # Optional: for faster loading
|
41 |
)
|
42 |
|
|
|
|
|
|
|
43 |
# Use LLaMA to extract skills
|
44 |
prompt = f"""
|
45 |
-
|
|
|
|
|
46 |
|
47 |
-
|
48 |
-
{question}:
|
49 |
|
50 |
Resume:
|
51 |
{text}
|
52 |
|
53 |
-
|
54 |
-
"""
|
55 |
|
56 |
response = llm(prompt, max_tokens=800, stop=["Human:", "\n\n"])
|
57 |
answer = response['choices'][0]['text'].strip()
|
|
|
|
|
|
|
|
|
58 |
print(answer)
|
59 |
return answer
|
60 |
|
@@ -79,6 +90,11 @@ def clone(audio, file):
|
|
79 |
def start_recording():
|
80 |
return None
|
81 |
|
|
|
|
|
|
|
|
|
|
|
82 |
with gr.Blocks() as iface:
|
83 |
with gr.Row():
|
84 |
audio_input = gr.Audio(sources="microphone", type="filepath", label='Question from Resume')
|
@@ -96,4 +112,8 @@ with gr.Blocks() as iface:
|
|
96 |
# Add event to start recording after output audio finishes
|
97 |
output.play(fn=start_recording, outputs=audio_input)
|
98 |
|
|
|
|
|
|
|
|
|
99 |
iface.launch()
|
|
|
7 |
from pdfminer.high_level import extract_text
|
8 |
from llama_cpp import Llama
|
9 |
|
|
|
10 |
# Check if MPS is available and set the device
|
11 |
if torch.backends.mps.is_available():
|
12 |
device = torch.device("mps")
|
|
|
25 |
return question
|
26 |
|
27 |
|
28 |
+
# Global variable to store chat history
|
29 |
+
chat_history = []
|
30 |
+
|
31 |
def extract_answer(question, text):
|
32 |
+
global chat_history
|
33 |
+
|
34 |
# Load the LLaMA model
|
35 |
model_path="/Users/chandima/.cache/lm-studio/models/lmstudio-community/Llama-3.2-3B-Instruct-GGUF/Llama-3.2-3B-Instruct-Q3_K_L.gguf"
|
36 |
# Load the LLaMA model with MPS acceleration
|
|
|
44 |
use_mmap=True, # Optional: for faster loading
|
45 |
)
|
46 |
|
47 |
+
# Construct the conversation history
|
48 |
+
conversation = "\n".join([f"Human: {q}\nAI: {a}" for q, a in chat_history])
|
49 |
+
|
50 |
# Use LLaMA to extract skills
|
51 |
prompt = f"""
|
52 |
+
You are an AI assistant answering questions based on a resume. Here's the conversation so far:
|
53 |
+
|
54 |
+
{conversation}
|
55 |
|
56 |
+
Human: {question}
|
|
|
57 |
|
58 |
Resume:
|
59 |
{text}
|
60 |
|
61 |
+
AI: """
|
|
|
62 |
|
63 |
response = llm(prompt, max_tokens=800, stop=["Human:", "\n\n"])
|
64 |
answer = response['choices'][0]['text'].strip()
|
65 |
+
|
66 |
+
# Append the new question and answer to the chat history
|
67 |
+
chat_history.append((question, answer))
|
68 |
+
|
69 |
print(answer)
|
70 |
return answer
|
71 |
|
|
|
90 |
def start_recording():
|
91 |
return None
|
92 |
|
93 |
+
def reset_conversation():
|
94 |
+
global chat_history
|
95 |
+
chat_history = []
|
96 |
+
return None
|
97 |
+
|
98 |
with gr.Blocks() as iface:
|
99 |
with gr.Row():
|
100 |
audio_input = gr.Audio(sources="microphone", type="filepath", label='Question from Resume')
|
|
|
112 |
# Add event to start recording after output audio finishes
|
113 |
output.play(fn=start_recording, outputs=audio_input)
|
114 |
|
115 |
+
# Add a button to reset the conversation
|
116 |
+
reset_btn = gr.Button("Reset Conversation")
|
117 |
+
reset_btn.click(fn=reset_conversation, inputs=None, outputs=None)
|
118 |
+
|
119 |
iface.launch()
|