Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,6 @@ import base64
|
|
5 |
from io import BytesIO
|
6 |
from PIL import Image
|
7 |
import numpy as np
|
8 |
-
import json
|
9 |
|
10 |
# Initialize the Together client
|
11 |
api_key = os.environ.get('TOGETHER_API_KEY')
|
@@ -25,31 +24,41 @@ def generate_gradio_app(image):
|
|
25 |
image.save(buffered, format="PNG")
|
26 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
27 |
|
28 |
-
# Prepare the
|
29 |
-
|
30 |
-
Analyze this wireframe image and suggest a simple Gradio app layout based on it. Describe the main elements you see and how they could be implemented using Gradio components."""
|
31 |
-
|
32 |
-
# Prepare the API request payload
|
33 |
-
payload = {
|
34 |
-
"model": "meta-llama/Llama-Vision-Free",
|
35 |
-
"messages": [{"role": "user", "content": message_content}],
|
36 |
-
"max_tokens": 512,
|
37 |
-
"temperature": 0.7,
|
38 |
-
"top_p": 0.7,
|
39 |
-
"top_k": 50,
|
40 |
-
"repetition_penalty": 1,
|
41 |
-
"stop": ["<|eot_id|>", "<|eom_id|>"],
|
42 |
-
"stream_tokens": True
|
43 |
-
}
|
44 |
|
45 |
# Make the API call
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
# Collect the streamed response
|
49 |
generated_text = ""
|
50 |
-
for chunk in
|
51 |
-
if chunk.choices
|
52 |
generated_text += chunk.choices[0].delta.content
|
|
|
53 |
|
54 |
return generated_text
|
55 |
|
@@ -67,7 +76,7 @@ with gr.Blocks() as demo:
|
|
67 |
generate_button = gr.Button("Analyze and Suggest", variant="primary")
|
68 |
|
69 |
with gr.Column(scale=2):
|
70 |
-
text_output = gr.Textbox(label="Analysis and Suggestions", lines=
|
71 |
|
72 |
generate_button.click(
|
73 |
fn=generate_gradio_app,
|
|
|
5 |
from io import BytesIO
|
6 |
from PIL import Image
|
7 |
import numpy as np
|
|
|
8 |
|
9 |
# Initialize the Together client
|
10 |
api_key = os.environ.get('TOGETHER_API_KEY')
|
|
|
24 |
image.save(buffered, format="PNG")
|
25 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
26 |
|
27 |
+
# Prepare the prompt
|
28 |
+
prompt = """You are a UX/UI designer. Describe the attached screenshot or UI mockup in detail. I will feed in the output you give me to a coding model that will attempt to recreate this mockup as a Gradio app, so please think step by step and describe the UI in detail. Pay close attention to background color, text color, font size, font family, padding, margin, border, etc. Match the colors and sizes exactly. Make sure to mention every part of the screenshot including any headers, footers, etc. Use the exact text from the screenshot. After describing the UI, suggest how this could be implemented using Gradio components."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Make the API call
|
31 |
+
stream = client.chat.completions.create(
|
32 |
+
model="meta-llama/Llama-Vision-Free",
|
33 |
+
messages=[
|
34 |
+
{
|
35 |
+
"role": "user",
|
36 |
+
"content": [
|
37 |
+
{"type": "text", "text": prompt},
|
38 |
+
{
|
39 |
+
"type": "image_url",
|
40 |
+
"image_url": {
|
41 |
+
"url": f"data:image/png;base64,{img_str}",
|
42 |
+
},
|
43 |
+
},
|
44 |
+
],
|
45 |
+
}
|
46 |
+
],
|
47 |
+
max_tokens=2048,
|
48 |
+
temperature=0.7,
|
49 |
+
top_p=0.7,
|
50 |
+
top_k=50,
|
51 |
+
repetition_penalty=1,
|
52 |
+
stop=["<|eot_id|>", "<|eom_id|>"],
|
53 |
+
stream=True
|
54 |
+
)
|
55 |
|
56 |
# Collect the streamed response
|
57 |
generated_text = ""
|
58 |
+
for chunk in stream:
|
59 |
+
if chunk.choices[0].delta.content is not None:
|
60 |
generated_text += chunk.choices[0].delta.content
|
61 |
+
yield generated_text # Yield partial results for gradio to update in real-time
|
62 |
|
63 |
return generated_text
|
64 |
|
|
|
76 |
generate_button = gr.Button("Analyze and Suggest", variant="primary")
|
77 |
|
78 |
with gr.Column(scale=2):
|
79 |
+
text_output = gr.Textbox(label="Analysis and Suggestions", lines=20)
|
80 |
|
81 |
generate_button.click(
|
82 |
fn=generate_gradio_app,
|