Spaces:
Runtime error
Runtime error
File size: 6,049 Bytes
86c7736 6473e4f 86c7736 54d8e93 86c7736 54d8e93 86c7736 54d8e93 86c7736 54d8e93 86c7736 54d8e93 86c7736 5d8fcd6 86c7736 6105d2a 5d8fcd6 7f8ee62 86c7736 c548d52 86c7736 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import json
import openai
import os
import pandas as pd
import gradio as gr
from collections import deque
from typing import Dict, List, Optional, Any
from langchain import LLMChain, OpenAI, PromptTemplate
import datetime
from datetime import datetime, date, time, timedelta
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, Document, ServiceContext
from langchain.llms import OpenAIChat
import feedparser
import pandas as pd
import numpy as np
from duckduckgo_search import ddg_videos
from duckduckgo_search import ddg
def get_learning_curriculum(openapikey,topic):
dateforfilesave=datetime.today().strftime("%d-%m-%Y %I:%M%p")
print(topic)
print(dateforfilesave)
if openapikey=='':
return pd.DataFrame(["Please provide OpenAPI Key"],columns=['ERROR'])
os.environ['OPENAI_API_KEY'] = str(openapikey)
###Task Creation Agent
prompt='You are a training center AI. Give me a detailed curriculum to learn about "{topicforquery}" using search. The curriculum will be a series of learning tasks to be achieved. Give output as a python list of jsons with "task name", "search keyword" to search to complete the task. Donot repeat the taks. For each task name also add a list of "questions" to ask the search results data to select specific articles and complete the curriculum. Remember the search list will be a dataframe of titles & body of the searched article and you may not be able to go through the full article hence these questions should be of types "Which article best suits a learning curriculum?", "Which article is learning oriented?. To reiterate output should be in json with keys task name ex: get beginner training articles for painting, search keyword ex: beginner painting & questions ex: What are top articles for painting?'.format(topicforquery=topic)
openai.api_key = os.getenv("OPENAI_API_KEY")
resp=openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
]
)
tasklist=json.loads(resp['choices'][0]['message']['content'])
###Function to search the internet using Duck-Duck-Go exposed as a tool
def research_search(search_keyword,question_to_ask,topic):
llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo"))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
keyword=search_keyword
keyword="+".join(keyword.lower().split())
keyword=keyword.replace(' and ',' AND ')
posts = ddg(keyword+' '+topic, safesearch='Off', page=1)
latestnews_df=pd.DataFrame(posts)
print(latestnews_df.columns)
#latestnews_df=latestnews_df.drop_duplicates(subset=['title','link','published'])
latestnews_df['text']='Title: '+latestnews_df['title']+' Description: '+latestnews_df['body']
print(latestnews_df['text'].tolist())
documents=[Document(t) for t in latestnews_df['text'].tolist()]
index = GPTSimpleVectorIndex.from_documents(documents)
prompt_query=question_to_ask
respstr=str(index.query(prompt_query,
service_context=service_context,
response_mode="tree_summarize",
similarity_top_k=10))
print("Search response: ",respstr)
return respstr
###Task Execution Agent loop
list1=[]
list2=[]
list3=[]
for i in range(len(tasklist)):
taskstuff=tasklist[i]
search_keyword=taskstuff['search keyword']
for question in taskstuff['questions']:
response_string=research_search(search_keyword,question,topic)
list1.append(taskstuff['task name'])
list2.append(question)
list3.append(response_string)
###Create dataframe to display
outputdf=pd.DataFrame()
outputdf['Task']=list1
outputdf['Question']=list2
outputdf['Learning']=list3
return outputdf
with gr.Blocks() as demo:
gr.Markdown("<h1><center>BabyAGI creates Learning Curriculum</center></h1>")
gr.Markdown(
""" This is the first step of an experiment using BabyAGI as a "framework" to construct focused use cases (ex: learning curriculums). The flow uses two AI agents 1) Task creation agent: to create a task list & questions 2) Task execution agent: to execute the tasks & find answers to the questions. Unlike the original BabyAGI concept, this is not open-ended. \n\nNote: This is a series of experiments to understand AI agents and hence do check the output for quality. OpenAI agents (gpt-3.5-turbo) & llama-index are used. The analysis takes roughly 120 secs & may not always be consistent. An error occurs when the OpenAI Api key is not provided/ ChatGPT API is overloaded/ChatGPT is unable to correctly decipher & format the output\n\n Future directions: 1) Make the task creation more open-ended or longer. 2) Discover multiple learning paths and make ChatGPT introspect on them before finalizing the optimal one 3)Learn from the answers and change the curriculum![visitors](https://visitor-badge.glitch.me/badge?page_id=hra/Curriculum-BabyAGI)"""
)
with gr.Row() as row:
with gr.Column():
textboxtopic = gr.Textbox(placeholder="Enter Topic for Curriculum...", lines=1,label='Topic')
with gr.Column():
textboxopenapi = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key')
with gr.Row() as row:
examples = gr.Examples(examples=['Acrylic painting','Generative AI','latest NLP topic models','FIFA mobile game','Telemedicine'],
inputs=[textboxtopic])
with gr.Row() as row:
btn = gr.Button("Generate \nCurriculum")
with gr.Row() as row:
table1=gr.Dataframe(
#headers=["Item", "Cost"],
#datatype=["str", "str","str"],
label="Learning Curriculum",
)
btn.click(get_learning_curriculum, inputs=[textboxopenapi,textboxtopic],outputs=[table1])
demo.launch(debug=True) |