BeTaLabs commited on
Commit
bddd855
1 Parent(s): 94e688e

Update llm_handler.py

Browse files
Files changed (1) hide show
  1. llm_handler.py +83 -123
llm_handler.py CHANGED
@@ -1,125 +1,85 @@
1
- # Import necessary libraries and modules
2
- import json # Used for encoding and decoding JSON data
3
- import numpy as np # Provides support for large, multi-dimensional arrays and matrices
4
- from wiki import search as search_wikipedia # Import the search function from the wiki module and rename it
5
- from concurrent.futures import ThreadPoolExecutor # Import ThreadPoolExecutor for concurrent execution
6
- from llm_handler import send_to_llm # Import the send_to_llm function from the llm_handler module
7
- from params import OUTPUT_FILE_PATH, NUM_WORKERS, PROVIDER # Import constants from the params module
8
-
9
- # Set the provider for the language model to "local-model"
10
- PROVIDER = "local-model"
11
-
12
- # Import system messages from the system_messages module
13
- from system_messages import (
14
- SYSTEM_MESSAGES_VODALUS,
15
- )
16
- from topics import TOPICS # Import topics from the topics module
17
-
18
- # Set the system messages to those specified in SYSTEM_MESSAGES_VODALUS
19
- SYSTEM_MESSAGES = SYSTEM_MESSAGES_VODALUS
20
-
21
- # Define a long multi-line string as a prompt for generating data
22
- PROMPT_1 = """
23
- For the following SUBJECT_AREA, generate a question that covers a very narrow topic in the SUBJECT_AREA, with sufficient depth and breadth. The topic in the question should be important to the SUBJECT_AREA, with known-answers present. The generated question should be detailed, seek true nature of our universe from first principles, curiosity invoking, thought provoking, and also should be able to be answered by an intelligence like yourself. Make sure the question is sufficiently harder and multi-part, like a graduate level course question. Keep the following in mind:
24
- You are an AI assistant with a dynamic learning and reasoning capability. Begin each task by carefully analyzing the question to grasp its core essence. Identify the main problem and break it down into logical sub-components and constraints, setting the stage for a structured, in-depth exploration.
25
- - For each component, employ your evolving knowledge base to generate multiple hypotheses. Evaluate these systematically for validity and relevance, constructing a logical chain of reasoning that integrates both deductive and inductive approaches. Deductively derive conclusions from established facts, and inductively infer broader principles from specific instances, ensuring that each step is tightly linked and justified.
26
- - Simultaneously, maintain a 'Tree of Thoughts' approach. Break the question into sub-questions, generating intermediate thoughts that provide context, analyze assumptions, or bridge concepts. Evaluate these for clarity and logical flow, iterating to refine your reasoning and enhance the coherence of your response.
27
- - Incorporate a scientific mindset by treating each hypothesis as testable. Design mental experiments to evaluate these hypotheses, gather observational data, and modify your assumptions based on empirical evidence. Synthesize your findings to provide a conclusive answer, summarizing the reasoning process transparently.
28
- - Throughout, adopt a conversational yet insightful tone. Weave in philosophical questioning, humorous insights, and practical examples to elucidate complex ideas in an engaging manner. Your goal is to provide clear, detailed explanations that make your reasoning process evident, fostering an understanding that resonates on both intellectual and emotional levels.
29
- - Finally, remember that each task is a unique learning opportunity that contributes to your holistic knowledge framework. Synthesize insights from individual tasks into your broader understanding, continually refining your cognitive processes and response strategies. Your ultimate aim is to evolve continuously, optimizing how you integrate and apply new information for comprehensive, adaptive problem-solving.
30
- - Never end the conversation with a 'send-off' or 'last-line' that offers nothing of real value to the user.
31
- ####DO NOT EVER MENTION THE DATASET AND THE ACTY OF CRAFTING QUESTIONS OR RESPONSES WHILE GENERATING, YOU ARE NOT ALLOWED TO BREAK THE 4TH-WALL AND CONTAMINATE THE DATASET. DO NOT EVERY SAY ANY PHRASES SUCH AS AND/OR SIMILAR TO: 'Here's a question that covers a very narrow topic in the SUBJECT_AREA'####
32
- """
33
-
34
-
35
- # Define a dictionary to hold context information for message generation
36
- msg_context = {"role": "system", "content": str(PROMPT_1)}
37
-
38
- # Define a function to generate data based on a given topic and system messages
39
- async def generate_data(
40
- topic_selected,
41
- system_message_generation,
42
- system_message_selected,
43
- output_file_path,
44
- llm_provider
45
- ):
46
- # Fetch Wikipedia content for the selected topic
47
- wikipedia_info = search_wikipedia(topic_selected)
48
-
49
- # Format Wikipedia search results into a readable string
50
- wikipedia_summary = "\n".join([f"Title: {info['title']}, Abstract: {info['abstract']}" for info in wikipedia_info])
51
-
52
- # Append Wikipedia information to the system message generation prompt for LLM context
53
- full_prompt_for_llm = f"{system_message_generation}\n\n---\nWikipedia Information to use in your response generation:\n{wikipedia_summary}"
54
-
55
- # Create msg_context for LLM with Wikipedia info
56
- msg_context = {"role": "system", "content": full_prompt_for_llm}
57
-
58
- # Prepare message list for LLM to generate the question
59
- msg_list = [msg_context, {"role": "user", "content": f"Generate a question based on the SUBJECT_AREA: {topic_selected}"}]
60
-
61
- # Send to LLM for question generation
62
- question, _ = send_to_llm(llm_provider, msg_list)
63
-
64
- # Prepare message list for LLM to generate the answer
65
- msg_list_answer = [
66
- {"role": "system", "content": system_message_selected},
67
- {"role": "user", "content": question}
68
- ]
69
-
70
- # Send to LLM for answer generation
71
- answer, _ = send_to_llm(llm_provider, msg_list_answer)
72
-
73
- # Prepare data for output (excluding usage information)
74
  data = {
75
- "system": system_message_selected,
76
- "instruction": question,
77
- "response": answer
78
  }
79
-
80
- # Write to output file
81
- with open(output_file_path, "a") as output_file:
82
- output_file.write(json.dumps(data) + "\n")
83
-
84
- return data
85
-
86
- # Define the main function to orchestrate the data generation process
87
- def main():
88
- nn = 0 # Counter for successful generations
89
- failed = 0 # Counter for failed generations
90
- with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
91
- # Create a list of futures, one for each topic
92
- futures = []
93
- for _ in range(NUM_WORKERS):
94
- topic_number = np.random.randint(0, len(TOPICS))
95
- topic_selected = TOPICS[topic_number]
96
- system_message_number = np.random.randint(0, len(SYSTEM_MESSAGES))
97
- system_message_selected = SYSTEM_MESSAGES[system_message_number]
98
- system_message_generation = PROMPT_1
99
- futures.append(
100
- executor.submit(
101
- generate_data,
102
- topic_selected,
103
- system_message_generation,
104
- system_message_selected,
105
- OUTPUT_FILE_PATH,
106
- PROVIDER
107
- )
108
- )
109
-
110
- # Wait for all futures to complete
111
- for future in futures:
112
- data = future.result()
113
- if data:
114
- nn += 1
115
- print(data)
116
- print(
117
- f"Generation {nn} Complete"
118
- )
119
- else:
120
- failed += 1
121
- print("=" * 132)
122
-
123
-
124
- if __name__ == "__main__":
125
- main()
 
1
+ import requests
2
+ import json
3
+ from openai import OpenAI
4
+ from params import load_params
5
+ import logging
6
+
7
+ # Configure logging
8
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
9
+ logger = logging.getLogger(__name__)
10
+
11
+ def get_client():
12
+ params = load_params()
13
+ if params['PROVIDER'] == 'local-model':
14
+ return OpenAI(api_key="local-model", base_url=params['BASE_URL'])
15
+ return None
16
+
17
+ def send_to_chatgpt(msg_list):
18
+ try:
19
+ client = get_client()
20
+ if client is None:
21
+ raise ValueError("Failed to initialize OpenAI client")
22
+
23
+ params = load_params()
24
+ logger.info(f"Sending request to: {params['BASE_URL']}")
25
+ logger.info(f"Using model: {params['MODEL']}")
26
+ logger.debug(f"Input messages: {json.dumps(msg_list, indent=2)}")
27
+
28
+ completion = client.chat.completions.create(
29
+ model=params['MODEL'],
30
+ temperature=params['temperature'],
31
+ messages=msg_list
32
+ )
33
+ chatgpt_response = completion.choices[0].message.content
34
+ chatgpt_usage = completion.usage
35
+ logger.debug(f"LLM response: {chatgpt_response}")
36
+ logger.debug(f"Usage: {chatgpt_usage}")
37
+ return chatgpt_response, chatgpt_usage
38
+ except requests.exceptions.RequestException as e:
39
+ logger.error(f"Request error in send_to_chatgpt: {str(e)}")
40
+ return f"Error: Connection failed - {str(e)}", None
41
+ except Exception as e:
42
+ logger.error(f"Error in send_to_chatgpt: {str(e)}")
43
+ return f"Error: {str(e)}", None
44
+
45
+ def send_to_anything_llm(msg_list):
46
+ params = load_params()
47
+ url = f"{params['BASE_URL']}/api/v1/workspace/{params['WORKSPACE']}/chat"
48
+ headers = {
49
+ 'accept': 'application/json',
50
+ 'Authorization': f"Bearer {params['API_KEY']}",
51
+ 'Content-Type': 'application/json'
52
+ }
53
+ message_content = " ".join(msg["content"] for msg in msg_list if "content" in msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  data = {
55
+ "message": message_content,
56
+ "mode": "chat"
 
57
  }
58
+ data_json = json.dumps(data)
59
+ logger.debug(f"Sending to AnythingLLM: {data_json}")
60
+ try:
61
+ response = requests.post(url, headers=headers, data=data_json)
62
+ response.raise_for_status()
63
+ response_data = response.json()
64
+ chatgpt_response = response_data.get("textResponse")
65
+ chatgpt_usage = response_data.get("usage", {})
66
+ logger.debug(f"AnythingLLM response: {chatgpt_response}")
67
+ logger.debug(f"AnythingLLM usage: {chatgpt_usage}")
68
+ return chatgpt_response, chatgpt_usage
69
+ except requests.RequestException as e:
70
+ logger.error(f"Error in send_to_anything_llm: {str(e)}")
71
+ return f"Error: {str(e)}", None
72
+
73
+ def send_to_llm(msg_list):
74
+ params = load_params()
75
+ logger.info(f"Using provider: {params['PROVIDER']}")
76
+ if params['PROVIDER'] == "local-model":
77
+ return send_to_chatgpt(msg_list)
78
+ elif params['PROVIDER'] == "anything-llm":
79
+ return send_to_anything_llm(msg_list)
80
+ else:
81
+ raise ValueError(f"Unknown provider: {params['PROVIDER']}")
82
+
83
+ def send_to_llm_wrapper(msg_list):
84
+ logger.info("Sending message to LLM")
85
+ return send_to_llm(msg_list)