|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
|
|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
|
|
model_name = "mistralai/Ministral-8B-Instruct-2410" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def generate_response(prompt): |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
outputs = model.generate(inputs['input_ids'], max_length=150) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return response |
|
|
|
|
|
st.title("Chatbot Mistral") |
|
st.write("Posez une question au chatbot :") |
|
|
|
|
|
user_input = st.text_input("Vous :") |
|
|
|
if st.button("Envoyer"): |
|
if user_input: |
|
with st.spinner("Génération de la réponse..."): |
|
response = generate_response(user_input) |
|
st.write("Chatbot :", response) |
|
else: |
|
st.write("Veuillez entrer un message.") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def generate_response(prompt): |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
outputs = model.generate(inputs['input_ids'], max_length=150) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return response |
|
|
|
|
|
st.title("Chatbot Mistral") |
|
st.write("Posez une question au chatbot :") |
|
|
|
|
|
user_input = st.text_input("Vous :") |
|
|
|
if st.button("Envoyer"): |
|
if user_input: |
|
with st.spinner("Génération de la réponse..."): |
|
response = generate_response(user_input) |
|
st.write("Chatbot :", response) |
|
else: |
|
st.write("Veuillez entrer un message.") |
|
|