Spaces:
Running
Running
File size: 8,038 Bytes
645bb63 ed5a50b df70e6e 6250865 645bb63 cd5e02d 5f27dc7 cd5e02d 8668335 645bb63 e958e6a 55363be e958e6a 645bb63 df70e6e 2ea4fdd 7f00f39 2ea4fdd 6250865 bc63486 f9309d8 6250865 f9309d8 6250865 df70e6e 8668335 645bb63 bc304a6 d05fda2 bc304a6 ed5a50b bc304a6 ed5a50b bc304a6 645bb63 524e75e 40a5d2f 524e75e 645bb63 53e796e 7709365 4843ba8 645bb63 4843ba8 2ea4fdd 4843ba8 7709365 4843ba8 cd5e02d 9793188 bc63486 9793188 bc63486 9793188 bc63486 4843ba8 7709365 4843ba8 d6f48b8 4843ba8 645bb63 4843ba8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
import streamlit as st
from meta_ai_api import MetaAI
from urllib.parse import urlparse
import pandas as pd
import plotly.express as px
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
import json
# Initialize Meta AI API
ai = MetaAI()
# Page config
st.set_page_config(
page_title="Meta AI Query Analysis - a Free SEO Tool by WordLift",
page_icon="img/fav-ico.png",
layout="centered",
initial_sidebar_state="collapsed",
menu_items={
'Get Help': 'https://wordlift.io/book-a-demo/',
'About': "# This is a demo app for Meta AI SEO Optimization"
}
)
# Sidebar
st.sidebar.image("img/logo-wordlift.png")
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
local_css("style.css")
def fetch_response(query):
response = ai.prompt(message=query)
return response
def display_sources(sources):
if sources:
for source in sources:
# Parse the domain from the URL
domain = urlparse(source['link']).netloc
# Format and display the domain and title
st.markdown(f"- **{domain}**: [{source['title']}]({source['link']})", unsafe_allow_html=True)
else:
st.write("No sources available.")
# ---------------------------------------------------------------------------- #
# Sentiment Analysis Function
# ---------------------------------------------------------------------------- #
# Download the VADER lexicon for sentiment analysis
nltk.download('vader_lexicon')
# Initialize the Sentiment Intensity Analyzer
sid = SentimentIntensityAnalyzer()
def sentiment_analysis(text):
# Split the text into sentences
sentences = [sentence.strip() for sentence in text.split('.') if sentence]
# Create a DataFrame to hold the content and sentiment scores
df = pd.DataFrame(sentences, columns=['content'])
# Calculate sentiment scores for each sentence
df['sentiment_scores'] = df['content'].apply(lambda x: sid.polarity_scores(x))
# Split sentiment_scores into separate columns
df = pd.concat([df.drop(['sentiment_scores'], axis=1), df['sentiment_scores'].apply(pd.Series)], axis=1)
# Determine the dominant sentiment and its confidence
df['dominant_sentiment'] = df[['neg', 'neu', 'pos']].idxmax(axis=1)
df['confidence'] = df[['neg', 'neu', 'pos']].max(axis=1)
return df
# ---------------------------------------------------------------------------- #
# Advanced Analysis
# ---------------------------------------------------------------------------- #
def fetch_advanced_analysis(query, msg):
analysis_prompt = f"""
Analyze the user's request: '{query}', and the response: '{msg}'.
Based on this analysis, generate a detailed JSON response including:
1. The user's intent,
2. Up to four follow-up questions,
3. The main entities mentioned in the response.
Example of expected JSON format:
{{
"user_intent": "Identify the effects of climate change on polar bears",
"follow_up_questions": [
"What are the primary threats to polar bears today?",
"How does the melting ice affect their habitat?",
"What conservation efforts are in place for polar bears?",
"How can individuals contribute to these efforts?"
],
"entities": {{
"animal": ["polar bears"],
"issue": ["climate change"],
"actions": ["conservation efforts"]
}}
}}
"""
# Assume ai is an initialized MetaAI instance that can send prompts to the AI service
advanced_response = ai.prompt(message=analysis_prompt)
return advanced_response
def parse_analysis(analysis_message):
try:
start = analysis_message.find('{')
end = analysis_message.rfind('}') + 1 # Find the last '}' and include it
if start != -1 and end != -1:
json_str = analysis_message[start:end]
print("Debug JSON String:", json_str) # Continue to use this for debugging
analysis_data = json.loads(json_str)
return analysis_data
else:
return {"error": "Valid JSON data not found in the response"}
except json.JSONDecodeError as e:
return {"error": "Failed to decode JSON", "details": str(e)}
# ---------------------------------------------------------------------------- #
# Main Function
# ---------------------------------------------------------------------------- #
def main():
# Path to the image
image_path = 'img/meta-ai-logo.png' # Replace with your image's filename and extension
# Create two columns
col1, col2 = st.columns([1, 2]) # Adjust the ratio as needed for your layout
# Use the first column to display the image
with col1:
st.image(image_path, width=60)
# Use the second column to display the title and other content
with col2:
st.title("Meta AI SEO Tool")
# Collapsible box with link to the site
with st.expander("ℹ️ Important Information", expanded=False):
st.markdown("""
- 🚨 **This is an experimental tool**: Functionality might vary, and it may not always work as expected.
- 📖 **Learn more about our research**: Understand what Meta AI is and why SEO matters by reading our in-depth article. [Read about Meta AI and SEO](https://wordlift.io/blog/en/meta-ai-seo/)""")
# User input
user_query = st.text_area("Enter your query:", height=150, key="query_overview")
submit_button = st.button("Analyze Query", key="submit_overview")
# Create tabs
tab1, tab2, tab3 = st.tabs(["Overview", "Analysis", "Sentiment"])
# Tab 1: Overview - Showing the initial response and sources
with tab1:
if submit_button and user_query:
response = fetch_response(user_query)
msg = response.get('message', 'No response message.')
st.write(msg)
with st.expander("Show Sources"):
display_sources(response.get('sources', []))
# Tab 2: Analysis - Showing the result of the advanced analysis
with tab2:
# In case you need inputs here as well, ensure they have unique keys
if 'submit_overview' in st.session_state and st.session_state.submit_overview:
advanced_response = fetch_advanced_analysis(st.session_state.query_overview, msg)
advanced_msg = advanced_response.get('message', 'No advanced analysis available.')
analysis_data = parse_analysis(advanced_msg)
if "error" not in analysis_data:
st.write("#### User Intent:", analysis_data['user_intent'])
st.divider() # 👈 An horizontal rule
st.write("### Follow-up Questions:")
for question in analysis_data['follow_up_questions']:
st.write("- " + question)
st.divider()
st.write("#### Identified Concepts:")
for entity_type, entities in analysis_data['entities'].items():
st.write(f"**{entity_type.capitalize()}**: {', '.join(entities)}")
st.divider()
# Tab 3: Sentiment - Displaying sentiment analysis of the response
with tab3:
if 'submit_overview' in st.session_state and st.session_state.submit_overview:
df_sentiment = sentiment_analysis(msg)
fig = px.scatter(df_sentiment, y='dominant_sentiment', color='dominant_sentiment', size='confidence',
hover_data=['content'],
color_discrete_map={"neg": "firebrick", "neu": "navajowhite", "pos": "darkgreen"},
labels={'dominant_sentiment': 'Sentiment'},
title='Sentiment Analysis of the Response')
fig.update_layout(width=800, height=300)
st.plotly_chart(fig)
if __name__ == "__main__":
main() |