import gradio as gr from dotenv import load_dotenv from langchain import PromptTemplate, LLMChain, HuggingFaceHub from langchain.llms import CTransformers from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import pipeline from langchain.llms.huggingface_pipeline import HuggingFacePipeline load_dotenv() def generate_prompts(user_input): prompt_template = PromptTemplate( input_variables=["Question"], template=f"list 10 quetion prompts for {user_input}" ) config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64} llm = CTransformers(model="TheBloke/Mistral-7B-Instruct-v0.1-GGUF", config=config) hub_chain = LLMChain(prompt = prompt_template, llm = llm) input_data = {"Question": user_input} generated_prompts = hub_chain.run(input_data) questions_list = generated_prompts.split('\n') formatted_questions = "\n".join(f"Question: {question}" for i, question in enumerate(questions_list) if question.strip()) questions_list = formatted_questions.split("Question:")[1:] return questions_list def answer_question(prompt): prompt_template = PromptTemplate( input_variables=["Question"], template=f"Answer '{prompt} 'and do not consider the number behind it." ) config = {'max_new_tokens': 64, 'temperature': 0.7, 'context_length': 64} llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML", config=config) hub_chain = LLMChain(prompt = prompt_template, llm = llm) input_data = {"Question": prompt} generated_answer = hub_chain.run(input_data) return generated_answer text_list = [] def updateChoices(prompt): newChoices = generate_prompts(prompt) return gr.CheckboxGroup(choices=newChoices) def setTextVisibility(cbg): update_show = [gr.Textbox(visible=True, label=text, value=answer_question(text)) for text in cbg] update_hide = [gr.Textbox(visible=False, label="") for _ in range(10-len(cbg))] return update_show + update_hide with gr.Blocks() as demo: gr.HTML("""