Spaces:
Sleeping
Sleeping
File size: 3,282 Bytes
75a1291 ad2b141 75a1291 7107e45 f7edc61 4e065db f7edc61 75a1291 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 75a1291 ad2b141 75a1291 ad2b141 75a1291 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 f7edc61 ad2b141 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import streamlit as st
from audio_recorder_streamlit import audio_recorder
import speech_recognition as sr
from gtts import gTTS
import tempfile
import os
from anthropic import Client # For Claude Haiku model
# Claude API setup
CLAUDE_API_KEY = st.secrets['claude_api_key'] # Store your Claude API key in Streamlit secrets
client = Client(api_key=CLAUDE_API_KEY)
# Main function for chatbot app
def main():
st.title("🎤 اردو وائس چیٹ بوٹ")
# Sidebar with information
st.sidebar.title("حامش راج")
st.sidebar.write("ماہر ڈیٹا سائنس اور جنریٹو اے آئی")
st.markdown("**اپنی آواز ریکارڈ کریں اور جواب حاصل کریں**")
# Audio Recorder
audio_data = audio_recorder(text='اردو میں بولیئے', icon_size="2x", icon_name="microphone-lines", key="urdu_recorder")
if audio_data is not None:
# Save the recorded audio to a temporary file
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio_file:
temp_audio_file.write(audio_data)
temp_audio_file_path = temp_audio_file.name
# Convert audio to text (Speech to Text in Urdu)
user_input_text = convert_audio_to_text(temp_audio_file_path)
# Display user input text
st.write(f"**آپ نے کہا:** {user_input_text}")
# Get LLM (Claude) response
response_text = get_claude_response(user_input_text)
# Display chatbot's text response
st.write(f"**جواب:** {response_text}")
# Convert response text to audio and play it
response_audio = convert_text_to_audio(response_text)
st.audio(response_audio)
# Clean up temporary audio file
os.remove(temp_audio_file_path)
# Function to convert audio to text (Urdu Speech Recognition)
def convert_audio_to_text(audio_file_path):
recognizer = sr.Recognizer()
with sr.AudioFile(audio_file_path) as source:
audio_data = recognizer.record(source)
try:
text = recognizer.recognize_google(audio_data, language="ur")
return text
except sr.UnknownValueError:
return "معذرت، میں آپ کی آواز سمجھ نہیں سکا"
except sr.RequestError:
return "معذرت، سرور دستیاب نہیں ہے"
# Function to get response from Claude (Langchain with RAG)
def get_claude_response(prompt_text):
prompt = f"براہ کرم اردو میں جواب دیں: {prompt_text}"
try:
# Query Claude Haiku via Langchain
response = client.completions.create(
model="claude-v1", # Claude Haiku model
prompt=prompt,
max_tokens_to_sample=100,
)
return response['completion']
except Exception as e:
return f"خطا: {str(e)}"
# Function to convert text to Urdu audio (Text-to-Speech)
def convert_text_to_audio(text):
try:
tts = gTTS(text=text, lang='ur')
temp_audio_path = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False).name
tts.save(temp_audio_path)
return temp_audio_path
except Exception as e:
return None
if __name__ == "__main__":
main()
|