Spaces:
Running
on
Zero
Running
on
Zero
update example
Browse files- app.py +85 -11
- imgs/demo_cases.png +3 -0
- imgs/demo_cases/AI_Pioneers.jpg +3 -0
- imgs/demo_cases/edit.png +3 -0
- imgs/demo_cases/entity.png +3 -0
- imgs/demo_cases/reasoning.png +3 -0
- imgs/demo_cases/same_pose.png +3 -0
- imgs/demo_cases/skeletal.png +3 -0
- imgs/demo_cases/skeletal2img.png +3 -0
- imgs/demo_cases/t2i_woman_with_book.png +3 -0
- imgs/overall.jpg +3 -0
- imgs/test_cases/cat.jpeg +3 -0
- imgs/test_cases/control.jpg +3 -0
- imgs/test_cases/lecun.png +3 -0
- imgs/test_cases/pose.png +3 -0
- imgs/test_cases/two_man.jpg +3 -0
- imgs/test_cases/watch.jpg +3 -0
- imgs/test_cases/woman.png +3 -0
- imgs/test_cases/yifei2.png +3 -0
- imgs/test_cases/young_musk.jpg +3 -0
- imgs/test_cases/young_trump.jpeg +3 -0
app.py
CHANGED
@@ -47,48 +47,122 @@ def generate_image(text, img1, img2, img3, height, width, guidance_scale, infere
|
|
47 |
def get_example():
|
48 |
case = [
|
49 |
[
|
50 |
-
"A
|
51 |
None,
|
52 |
None,
|
53 |
None,
|
54 |
1024,
|
55 |
1024,
|
56 |
-
|
57 |
50,
|
58 |
-
|
59 |
],
|
60 |
[
|
61 |
-
"A woman
|
62 |
-
"./imgs/test_cases/
|
63 |
None,
|
64 |
None,
|
65 |
1024,
|
66 |
1024,
|
67 |
-
|
68 |
50,
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
],
|
71 |
[
|
72 |
-
"
|
73 |
-
"./imgs/test_cases/
|
74 |
"./imgs/test_cases/img2.jpg",
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
1024,
|
77 |
1024,
|
78 |
-
|
79 |
50,
|
80 |
42,
|
81 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
]
|
83 |
return case
|
84 |
|
85 |
def run_for_examples(text, img1, img2, img3, height, width, guidance_scale, inference_steps, seed):
|
86 |
return generate_image(text, img1, img2, img3, height, width, guidance_scale, inference_steps, seed)
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
# Gradio 接口
|
90 |
with gr.Blocks() as demo:
|
91 |
gr.Markdown("# OmniGen: Unified Image Generation [paper](https://arxiv.org/abs/2409.11340) [code](https://github.com/VectorSpaceLab/OmniGen)")
|
|
|
92 |
with gr.Row():
|
93 |
with gr.Column():
|
94 |
# 文本输入框
|
|
|
47 |
def get_example():
|
48 |
case = [
|
49 |
[
|
50 |
+
"A vintage camera placed on the ground, ejecting a swirling cloud of Polaroid-style photographs into the air. The photos, showing landscapes, wildlife, and travel scenes, seem to defy gravity, floating upward in a vortex of motion. The camera emits a glowing, smoky light from within, enhancing the magical, surreal atmosphere. The dark background contrasts with the illuminated photos and camera, creating a dreamlike, nostalgic scene filled with vibrant colors and dynamic movement. Scattered photos are visible on the ground, further contributing to the idea of an explosion of captured memories.",
|
51 |
None,
|
52 |
None,
|
53 |
None,
|
54 |
1024,
|
55 |
1024,
|
56 |
+
2.5,
|
57 |
50,
|
58 |
+
0,
|
59 |
],
|
60 |
[
|
61 |
+
"A woman <img><|image_1|></img> in a wedding dress. Next to her is a black-haired man.",
|
62 |
+
"./imgs/test_cases/yifei2.png",
|
63 |
None,
|
64 |
None,
|
65 |
1024,
|
66 |
1024,
|
67 |
+
2.5,
|
68 |
50,
|
69 |
+
0,
|
70 |
+
],
|
71 |
+
[
|
72 |
+
"A man in a black shirt is reading a book. The man is the right man in <img><|image_1|></img>.",
|
73 |
+
"./imgs/test_cases/two_man.jpg",
|
74 |
+
None,
|
75 |
+
None,
|
76 |
+
1024,
|
77 |
+
1024,
|
78 |
+
2.5,
|
79 |
+
50,
|
80 |
+
0,
|
81 |
],
|
82 |
[
|
83 |
+
"Two men are celebrating with raised glasses in a restaurant. A man is <img><|image_1|></img>. The other man is <img><|image_2|></img>.",
|
84 |
+
"./imgs/test_cases/young_musk.jpg",
|
85 |
"./imgs/test_cases/img2.jpg",
|
86 |
+
None,
|
87 |
+
1024,
|
88 |
+
1024,
|
89 |
+
2.5,
|
90 |
+
50,
|
91 |
+
0,
|
92 |
+
],
|
93 |
+
[
|
94 |
+
"<img><|image_1|><img>\n Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola.",
|
95 |
+
"./imgs/demo_cases/t2i_woman_with_book.png",
|
96 |
+
None,
|
97 |
+
None,
|
98 |
+
1024,
|
99 |
+
1024,
|
100 |
+
2.5,
|
101 |
+
50,
|
102 |
+
222,
|
103 |
+
],
|
104 |
+
[
|
105 |
+
"Detect the skeleton of human in this image: <img><|image_1|></img>.",
|
106 |
+
"./imgs/test_cases/control.jpg",
|
107 |
+
None,
|
108 |
+
None,
|
109 |
+
1024,
|
110 |
+
1024,
|
111 |
+
2.0,
|
112 |
+
50,
|
113 |
+
0,
|
114 |
+
],
|
115 |
+
[
|
116 |
+
"Generate a new photo using the following picture and text as conditions: <img><|image_1|><img>\n A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.",
|
117 |
+
"./imgs/demo_cases/skeletal.png",
|
118 |
+
None,
|
119 |
+
None,
|
120 |
1024,
|
121 |
1024,
|
122 |
+
2,
|
123 |
50,
|
124 |
42,
|
125 |
],
|
126 |
+
[
|
127 |
+
"Following the pose of this image <img><|image_1|><img>, generate a new photo: A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.",
|
128 |
+
"./imgs/demo_cases/edit.png",
|
129 |
+
None,
|
130 |
+
None,
|
131 |
+
1024,
|
132 |
+
1024,
|
133 |
+
2.0,
|
134 |
+
50,
|
135 |
+
123,
|
136 |
+
],
|
137 |
+
[
|
138 |
+
"<img><|image_1|><\/img> What item can be used to see the current time? Please remove it.",
|
139 |
+
"./imgs/test_cases/watch.jpg",
|
140 |
+
None,
|
141 |
+
None,
|
142 |
+
1024,
|
143 |
+
1024,
|
144 |
+
2.5,
|
145 |
+
50,
|
146 |
+
0,
|
147 |
+
],
|
148 |
]
|
149 |
return case
|
150 |
|
151 |
def run_for_examples(text, img1, img2, img3, height, width, guidance_scale, inference_steps, seed):
|
152 |
return generate_image(text, img1, img2, img3, height, width, guidance_scale, inference_steps, seed)
|
153 |
|
154 |
+
description = """
|
155 |
+
OmniGen is a unified image generation model that you can use to perform various tasks, including but not limited to text-to-image generation, subject-driven generation, Identity-Preserving Generation, and image-conditioned generation.
|
156 |
+
|
157 |
+
For multi-modal to image generation, you should pass a string as `prompt`, and a list of image paths as `input_images`. The placeholder in the prompt should be in the format of `<img><|image_*|></img>`.
|
158 |
+
For example, use a image of a woman to generate a new image:
|
159 |
+
prompt = "A woman holds a bouquet of flowers and faces the camera. Thw woman is <img><|image_1|></img>."
|
160 |
+
"""
|
161 |
|
162 |
# Gradio 接口
|
163 |
with gr.Blocks() as demo:
|
164 |
gr.Markdown("# OmniGen: Unified Image Generation [paper](https://arxiv.org/abs/2409.11340) [code](https://github.com/VectorSpaceLab/OmniGen)")
|
165 |
+
gr.Markdown(description)
|
166 |
with gr.Row():
|
167 |
with gr.Column():
|
168 |
# 文本输入框
|
imgs/demo_cases.png
ADDED
Git LFS Details
|
imgs/demo_cases/AI_Pioneers.jpg
ADDED
Git LFS Details
|
imgs/demo_cases/edit.png
ADDED
Git LFS Details
|
imgs/demo_cases/entity.png
ADDED
Git LFS Details
|
imgs/demo_cases/reasoning.png
ADDED
Git LFS Details
|
imgs/demo_cases/same_pose.png
ADDED
Git LFS Details
|
imgs/demo_cases/skeletal.png
ADDED
Git LFS Details
|
imgs/demo_cases/skeletal2img.png
ADDED
Git LFS Details
|
imgs/demo_cases/t2i_woman_with_book.png
ADDED
Git LFS Details
|
imgs/overall.jpg
ADDED
Git LFS Details
|
imgs/test_cases/cat.jpeg
ADDED
Git LFS Details
|
imgs/test_cases/control.jpg
ADDED
Git LFS Details
|
imgs/test_cases/lecun.png
ADDED
Git LFS Details
|
imgs/test_cases/pose.png
ADDED
Git LFS Details
|
imgs/test_cases/two_man.jpg
ADDED
Git LFS Details
|
imgs/test_cases/watch.jpg
ADDED
Git LFS Details
|
imgs/test_cases/woman.png
ADDED
Git LFS Details
|
imgs/test_cases/yifei2.png
ADDED
Git LFS Details
|
imgs/test_cases/young_musk.jpg
ADDED
Git LFS Details
|
imgs/test_cases/young_trump.jpeg
ADDED
Git LFS Details
|