from datasets import load_dataset from datasets import Dataset from sentence_transformers import SentenceTransformer import faiss import time from datetime import datetime import json #import torch import uuid import pandas as pd from llama_cpp import Llama #from langchain_community.llms import LlamaCpp from threading import Thread from huggingface_hub import Repository, upload_file import os HF_TOKEN = os.getenv('HF_Token') #Log_Path="./Logfolder" logfile = 'DiabetesChatLog.txt' data = load_dataset("Namitg02/Test", split='train', streaming=False) #Returns a list of dictionaries, each representing a row in the dataset. length = len(data) embedding_model = SentenceTransformer("all-MiniLM-L6-v2") embedding_dim = embedding_model.get_sentence_embedding_dimension() # Returns dimensions of embedidng index = faiss.IndexFlatL2(embedding_dim) data.add_faiss_index("embeddings", custom_index=index) # adds an index column for the embeddings #question = "How can I reverse Diabetes?" SYS_PROMPT = """You are an assistant for answering questions like a medical person. You are given the extracted parts of document, a question and history of questions and answers . Provide a brief conversational answer. If you do not know the answer, just say "I do not know." Do not make up an answer. Don't repeat the SYS_PROMPT or say that you are referring to document or an article.""" # Provides context of how to answer the question #llm_model = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF", tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf # TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF , TinyLlama/TinyLlama-1.1B-Chat-v0.6, andrijdavid/TinyLlama-1.1B-Chat-v1.0-GGUF" model = Llama( model_path="./llama-2-7b-chat.Q4_K_M.gguf", # chat_format="llama-2", n_gpu_layers = 0, temperature=0.75, n_ctx = 4096, top_p=0.95 #, # eos_tokens=terminators # callback_manager=callback_manager, # verbose=True, # Verbose is required to pass to the callback manager ) #initiate model and tokenizer def search(query: str, k: int = 2 ): """a function that embeds a new query and returns the most probable results""" embedded_query = embedding_model.encode(query) # create embedding of a new query scores, retrieved_examples = data.get_nearest_examples( # retrieve results "embeddings", embedded_query, # compare our new embedded query with the dataset embeddings k=k # get only top k results ) return scores, retrieved_examples # returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format # called by talk function that passes prompt def format_prompt(prompt,retrieved_documents,k,history,memory_limit=3): """using the retrieved documents we will prompt the model to generate our responses""" PROMPT = f"Question:{prompt}\nContext:" for idx in range(k) : PROMPT+= f"{retrieved_documents['0'][idx]}\n" print("historyinfo") print(f"{history}") if len(history) == 0: return PROMPT if len(history) > memory_limit: history = history[-memory_limit:] print("checkwohist") # PROMPT = PROMPT + f"{history[0][0]} [/INST] {history[0][1]} " # Handle conversation history for user_message, bot_message in history[0:]: PROMPT += f"[INST] {user_message} [/INST] {bot_message} " print("checkwthhist2") return PROMPT # Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string that are retreived def talk(prompt, history): k = 2 # number of retrieved documents scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed print(retrieved_documents.keys()) # print("check4") formatted_prompt = format_prompt(prompt,retrieved_documents,k,history,memory_limit=3) # create a new prompt using the retrieved documents print("check5") pd.options.display.max_colwidth = 4000 messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}] print(messages) # binding the system context and new prompt for LLM # the chat template structure should be based on text generation model format # indicates the end of a sequence stream = model.create_chat_completion(messages = messages,max_tokens =400, stop=[""], stream=False) # print(f"{stream}") print("check 7") print(stream['choices'][0]['message']['content']) response = stream['choices'][0]['message']['content'] # for user_message, bot_message in history[0:]: # historylog += f"[INST] {user_message} [/INST] {bot_message} " historylog = '' historylog += f"{prompt} \n {response} " print("history log") print(str(historylog)) print("history log string printed") try: # write data to file unique_filename = f"file_{uuid.uuid4()}.txt" # timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') # filename = f"file_{timestamp}.txt" with open(unique_filename, "a") as data: data.write(historylog) print("History log printed:") with open(unique_filename, "r") as data: print(data.read()) # with open("./file.txt", "a") as data: # data.write(historylog) # Read the contents of the file to display it. # print("History log printed:") # with open("./file.txt", "r") as data: # print(data.read()) except IOError as e: print(f"An error occurred: {e}") # from huggingface_hub import HfApi # api = HfApi() # api.upload_file( # path_or_fileobj="./file.txt", # path_in_repo="file.txt", # repo_id="Namitg02/Test", # repo_type="space" # ) print("upload section passed") for i in range(len(response)): time.sleep(0.05) yield response[: i+1] # calling the model to generate response based on message/ input # do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary # temperature controls randomness. more renadomness with higher temperature # only the tokens comprising the top_p probability mass are considered for responses # This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary. TITLE = "AI Copilot for Diabetes Patients" DESCRIPTION = "I provide answers to concerns related to Diabetes" import gradio as gr # Design chatbot demo = gr.ChatInterface( fn=talk, chatbot=gr.Chatbot( show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="bubble", bubble_full_width=False, ), theme="Soft", examples=[["what is Diabetes?"]], title=TITLE, description=DESCRIPTION, ) # launch chatbot and calls the talk function which in turn calls other functions print("check14") #memory_panda = pd.DataFrame(historylog) #Logfile = Dataset.from_pandas(memory_panda) #Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN) demo.launch() #demo.launch(auth=("namit", "wolfmagic"))