Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import openai
|
3 |
+
import requests
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from duckduckgo_search import ddg
|
7 |
+
|
8 |
+
os.environ["OPENAI_API_KEY"] = "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"
|
9 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
10 |
+
|
11 |
+
def search_duckduckgo(query):
|
12 |
+
|
13 |
+
keywords = query
|
14 |
+
results = ddg(keywords, region='wt-wt', safesearch='Off', time='m')
|
15 |
+
filtered_results = [{"title": res["title"], "body": res["body"]} for res in results]
|
16 |
+
print(filtered_results)
|
17 |
+
return filtered_results
|
18 |
+
|
19 |
+
def get_search_query(task):
|
20 |
+
|
21 |
+
response = openai.ChatCompletion.create(
|
22 |
+
model="gpt-3.5-turbo",
|
23 |
+
messages=[
|
24 |
+
{"role": "user", "content": f"Given the task: {task}. Generate a concise search query with 1-3 keywords."}
|
25 |
+
]
|
26 |
+
)
|
27 |
+
search_query = response.choices[0]['message']['content'].strip()
|
28 |
+
print("Agent 2: ",search_query)
|
29 |
+
return search_query
|
30 |
+
|
31 |
+
def summarize_search_result(task, search_result):
|
32 |
+
|
33 |
+
response = openai.ChatCompletion.create(
|
34 |
+
model="gpt-3.5-turbo",
|
35 |
+
messages=[
|
36 |
+
{"role": "user", "content":f"Given the task '{task}' and the search result '{json.dumps(search_result)}', provide a summarized result."}
|
37 |
+
]
|
38 |
+
)
|
39 |
+
summary = response.choices[0]['message']['content'].strip()
|
40 |
+
return summary
|
41 |
+
|
42 |
+
def agent_1(objective):
|
43 |
+
|
44 |
+
response = openai.ChatCompletion.create(
|
45 |
+
model="gpt-3.5-turbo",
|
46 |
+
messages=[
|
47 |
+
{"role": "user", "content":f"Given the objective '{objective}', create a list of tasks that are closely related to the objective. If needed add specific key words from objective to the task sentence"}
|
48 |
+
]
|
49 |
+
)
|
50 |
+
tasks = response.choices[0]['message']['content'].strip().split('\n')
|
51 |
+
return tasks
|
52 |
+
|
53 |
+
def agent_2(task):
|
54 |
+
|
55 |
+
search_query = get_search_query(task)
|
56 |
+
print("Agent 2")
|
57 |
+
print(search_query)
|
58 |
+
search_results = search_duckduckgo(search_query)
|
59 |
+
summarized_result = summarize_search_result(task, search_results)
|
60 |
+
print(summarized_result)
|
61 |
+
return summarized_result
|
62 |
+
|
63 |
+
def agent_3(objective, last_result, tasks):
|
64 |
+
|
65 |
+
task_list = '\n'.join(tasks)
|
66 |
+
response = openai.ChatCompletion.create(
|
67 |
+
model="gpt-3.5-turbo",
|
68 |
+
messages=[
|
69 |
+
{"role": "user", "content":f"Given the objective '{objective}', the last result '{json.dumps(last_result)}', and the task list:\n{task_list}\n\nRank the tasks based on their relevance to the objective."}
|
70 |
+
]
|
71 |
+
)
|
72 |
+
modified_tasks = response.choices[0]['message']['content'].strip().split('\n')
|
73 |
+
print("Agent 3")
|
74 |
+
print(modified_tasks)
|
75 |
+
return modified_tasks
|
76 |
+
|
77 |
+
def summarize_result(objective, result):
|
78 |
+
|
79 |
+
response = openai.ChatCompletion.create(
|
80 |
+
model="gpt-3.5-turbo",
|
81 |
+
messages=[
|
82 |
+
{"role": "user", "content":f"Given the objective '{objective}' and the final result '{json.dumps(result)}', provide a summary."}
|
83 |
+
]
|
84 |
+
)
|
85 |
+
summary = response.choices[0]['message']['content'].strip()
|
86 |
+
return summary
|
87 |
+
|
88 |
+
def generate_final_answer(objective, all_results):
|
89 |
+
|
90 |
+
response = openai.ChatCompletion.create(
|
91 |
+
model="gpt-3.5-turbo",
|
92 |
+
messages=[
|
93 |
+
{"role": "user", "content":f"Given the objective '{objective}' and the collected results '{json.dumps(all_results)}', provide a final answer addressing the objective."}
|
94 |
+
]
|
95 |
+
)
|
96 |
+
final_answer = response.choices[0]['message']['content'].strip()
|
97 |
+
return final_answer
|
98 |
+
|
99 |
+
def main(objective, loop_count):
|
100 |
+
tasks = agent_1(objective)
|
101 |
+
all_results = []
|
102 |
+
completed_tasks = []
|
103 |
+
for i in range(loop_count):
|
104 |
+
print(i+1)
|
105 |
+
if i < len(tasks):
|
106 |
+
print('NEXT TASK: ',tasks[i])
|
107 |
+
completed_tasks.append(tasks[i])
|
108 |
+
result = agent_2(tasks[i])
|
109 |
+
all_results.append(result)
|
110 |
+
tasks = agent_3(objective, result, tasks)
|
111 |
+
print('*********************')
|
112 |
+
else:
|
113 |
+
break
|
114 |
+
|
115 |
+
final_answer = generate_final_answer(objective, all_results)
|
116 |
+
return final_answer,completed_tasks,tasks
|
117 |
+
|
118 |
+
def getbabyagianswer(objective,loop_count,openapikey):
|
119 |
+
objective=objective
|
120 |
+
|
121 |
+
loop_count = loop_count
|
122 |
+
final_summary,completed_tasts,all_tasks = main(objective, loop_count)
|
123 |
+
print("Final Summary:", final_summary)
|
124 |
+
return final_summary,completed_tasts,all_tasks
|
125 |
+
|
126 |
+
|
127 |
+
with gr.Blocks() as demo:
|
128 |
+
gr.Markdown("<h1><center>GPT4 created BabyAGI</center></h1>")
|
129 |
+
gr.Markdown(
|
130 |
+
""" This is part of a series of experiments using BabyAGI as a "framework" to construct focused use cases (ex: idea generation). In this GPT-4 was prompted to create a BabyAGI with task creation & execution agents but with constraints to give answer with a specified number of loops. Unlike the original BabyAGI concept, this is not open-ended. \n\nNote: This is a series of experiments to understand AI agents and hence do check the quality of output. OpenAI agents (gpt-3.5-turbo) & DuckDuckGo search are used. The analysis takes roughly 120 secs & may not always be consistent. An error occurs when the OpenAI Api key is not provided/ ChatGPT API is overloaded/ ChatGPT is unable to correctly decipher & format the output\n\n[visitors](https://visitor-badge.glitch.me/badge?page_id=hra/GPT4-makes-BabyAGI)"""
|
131 |
+
)
|
132 |
+
|
133 |
+
with gr.Row() as row:
|
134 |
+
with gr.Column():
|
135 |
+
textboxtopic = gr.Textbox(placeholder="Enter Topic for Curriculum...", lines=1,label='Topic')
|
136 |
+
with gr.Column():
|
137 |
+
textboxtopic = gr.Textbox(placeholder="Enter # of loops...", lines=1,label='Loop Count')
|
138 |
+
with gr.Column():
|
139 |
+
textboxopenapi = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key')
|
140 |
+
with gr.Row() as row:
|
141 |
+
examples = gr.Examples(examples=['Give me a startup idea in healthcare technology for India','Which is a must see destination in Mysore?','Find me a unique cuisine restaurant in bangalore','Give me a startup idea for AI in music streaming'],
|
142 |
+
inputs=[textboxtopic])
|
143 |
+
with gr.Row() as row:
|
144 |
+
btn = gr.Button("Unleash AI Agent")
|
145 |
+
|
146 |
+
with gr.Row() as row:
|
147 |
+
with gr.Column():
|
148 |
+
answer1 = gr.Textbox(placeholder="", lines=1,label='Answer')
|
149 |
+
with gr.Column():
|
150 |
+
fulltasklist1 = gr.Textbox(placeholder="", lines=1,label='Full Task List')
|
151 |
+
with gr.Column():
|
152 |
+
completedtasklist1 = gr.Textbox(placeholder="", lines=1,label='Completed Tasks')
|
153 |
+
|
154 |
+
btn.click(getbabyagianswer, inputs=[textboxtopic,textboxloopcount,textboxopenapi,],outputs=[answer1,fulltasklist1,completedtasklist1])
|
155 |
+
|
156 |
+
demo.launch(debug=True)
|