Spaces:
Runtime error
Runtime error
import json | |
import openai | |
import os | |
import pandas as pd | |
import os | |
from collections import deque | |
from typing import Dict, List, Optional, Any | |
from langchain import LLMChain, OpenAI, PromptTemplate | |
from langchain.embeddings import OpenAIEmbeddings | |
from langchain.llms import BaseLLM | |
from langchain.vectorstores.base import VectorStore | |
from pydantic import BaseModel, Field | |
from langchain.chains.base import Chain | |
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor | |
from langchain import OpenAI, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
import datetime | |
from datetime import datetime, date, time, timedelta | |
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, Document, ServiceContext | |
from langchain.llms import OpenAIChat | |
import feedparser | |
import pandas as pd | |
import numpy as np | |
from duckduckgo_search import ddg_videos | |
from duckduckgo_search import ddg | |
def get_learning_curriculum(openapikey,topic): | |
dateforfilesave=datetime.today().strftime("%d-%m-%Y %I:%M%p") | |
print(topic) | |
print(dateforfilesave) | |
if openapikey=='': | |
return pd.DataFrame(["Please provide OpenAPI Key"],columns=['ERROR']) | |
os.environ['OPENAI_API_KEY'] = str(openapikey) | |
prompt='You are a training center AI. Give me a detailed curriculum to learn about "{topicforquery}" using search. The curriculum will be a series of learning tasks to be achieved. Give output as a python list of jsons with "task name", "search keyword" to search to complete the task. Donot repeat the taks. For each task name also add a list of "questions" to ask the search results data to select specific articles and complete the curriculum. Remember the search list will be a dataframe of titles & body of the searched article and you may not be able to go through the full article hence these questions should be of types "Which article best suits a learning curriculum?", "Which article is learning oriented?. To reiterate output should be in json with keys task name ex: get beginner training articles for painting, search keyword ex: beginner painting & questions ex: What are top articles for painting?'.format(topicforquery=topic) | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
resp=openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=[ | |
{"role": "user", "content": prompt} | |
] | |
) | |
tasklist=json.loads(resp['choices'][0]['message']['content']) | |
llm = ChatOpenAI(temperature=0) | |
def research_search(search_keyword,question_to_ask,topic): | |
llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo")) | |
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) | |
keyword=search_keyword | |
keyword="+".join(keyword.lower().split()) | |
keyword=keyword.replace(' and ',' AND ') | |
posts = ddg(keyword+' '+topic, safesearch='Off', page=1) | |
latestnews_df=pd.DataFrame(posts) | |
print(latestnews_df.columns) | |
#latestnews_df=latestnews_df.drop_duplicates(subset=['title','link','published']) | |
latestnews_df['text']='Title: '+latestnews_df['title']+' Description: '+latestnews_df['body'] | |
print(latestnews_df['text'].tolist()) | |
documents=[Document(t) for t in latestnews_df['text'].tolist()] | |
index = GPTSimpleVectorIndex.from_documents(documents) | |
prompt_query=question_to_ask | |
respstr=str(index.query(prompt_query, | |
service_context=service_context, | |
response_mode="tree_summarize", | |
similarity_top_k=10)) | |
print("Search response: ",respstr) | |
return respstr | |
finallist=[] | |
list1=[] | |
list2=[] | |
list3=[] | |
for i in range(len(tasklist)): | |
taskstuff=tasklist[i] | |
search_keyword=taskstuff['search keyword'] | |
print('Task Name: '+taskstuff['task name']) | |
finallist.append('Task Name: '+taskstuff['task name']) | |
for question in taskstuff['questions']: | |
response_string=research_search(search_keyword,question,topic) | |
finallist.append(" Question: "+question) | |
finallist.append(" "+response_string) | |
list1.append(taskstuff['task name']) | |
list2.append(question) | |
list3.append(response_string) | |
outputdf=pd.DataFrame() | |
outputdf['Task']=list1 | |
outputdf['Question']=list2 | |
outputdf['Learning']=list3 | |
return outputdf | |
with gr.Blocks() as demo: | |
gr.Markdown("<h1><center>BabyAGI creates Learning Curriculum</center></h1>") | |
gr.Markdown( | |
"""What are the sectors with positive momentum? What are the macro trends? Which companies have momentum? Sector summaries and much more. \n\nThis is a demo & showcases ChatGPT integrated with real data. It shows how to get real-time data and marry it with ChatGPT capabilities. This demonstrates 'Chain of Thought' thinking using ChatGPT.\n\n4 snapshots are provided for illustration (trends, sector outlook, news summary email, macro trends email)\n\nNote: llama-index & gpt-3.5-turbo are used. The analysis takes roughly 120 secs & may not always be consistent. If ChatGPT API is overloaded you will get an error\n ![visitors](https://visitor-badge.glitch.me/badge?page_id=hra.chatgpt-stock-news-snapshots)""" | |
) | |
with gr.Row() as row: | |
with gr.Column(): | |
textboxtopic = gr.Textbox(placeholder="Enter Topic for Curriculum...", lines=1,label='Topic') | |
with gr.Column(): | |
textboxopenapi = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key') | |
with gr.Row() as row: | |
btn = gr.Button("Generate \nCurriculum") | |
with gr.Row() as row: | |
table1=gr.Dataframe( | |
#headers=["Item", "Cost"], | |
#datatype=["str", "str","str"], | |
label="Learning Curriculum", | |
) | |
btn.click(getstuff, inputs=[textboxopenapi,textboxtopic],outputs=[table1]) | |
demo.launch(debug=True) |