import streamlit as st import os from openai import OpenAI # Initialize the Nvidia API client using API Key stored in Streamlit secrets client =OpenAI( base_url = "https://integrate.api.nvidia.com/v1", api_key = os.getenv("NVIDIA_API_KEY") ) # Define Streamlit app layout st.title("AWS Well-Architected Review") st.write("Get recommendations for optimizing your AWS architecture.") if "messages" not in st.session_state: st.session_state.messages = [ {"role": "system", "content": "You are an assistant that provides recommendations based on AWS Well-Architected Review best practices. Focus on the 5 pillars: Operational Excellence, Security, Reliability, Performance Efficiency, and Cost Optimization."} ] # User input for AWS architecture description architecture_input = st.text_area("Describe your AWS architecture:") # Button to submit the input if st.button("Get Recommendations"): if architecture_input: # Add user input to the conversation st.session_state.messages.append({"role": "user", "content": architecture_input}) with st.chat_message("assistant"): with st.spinner("Generating recommendations..."): # Create Nvidia completion request with conversation history stream = client.chat.completions.create( model="nvidia/llama-3.1-nemotron-70b-instruct", # Nvidia model name messages=st.session_state.messages, # Include all messages in the API call temperature=0.5, top_p=0.7, max_tokens=1024, stream=True, ) response_chunks = [] for chunk in stream: if chunk.choices[0].delta.content is not None: response_chunks.append(chunk.choices[0].delta.content) response = "".join(response_chunks) # Display the response as recommendations st.markdown(f"**Recommendations:**\n\n{response}") # Add response to conversation history st.session_state.messages.append({"role": "assistant", "content": response})