liuhaotian
commited on
Commit
•
1c7aced
1
Parent(s):
c99e9ff
Fix
Browse files- app.py +13 -0
- examples/extreme_ironing.jpg +0 -0
- examples/waterview.jpg +0 -0
app.py
CHANGED
@@ -58,6 +58,19 @@ if __name__ == "__main__":
|
|
58 |
gws.args = parser.parse_args()
|
59 |
gws.models = []
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
print(f"args: {gws.args}")
|
62 |
|
63 |
model_path = os.getenv("model", "liuhaotian/llava-v1.6-mistral-7b")
|
|
|
58 |
gws.args = parser.parse_args()
|
59 |
gws.models = []
|
60 |
|
61 |
+
gws.title_markdown += """
|
62 |
+
|
63 |
+
ONLY WORKS WITH GPU! By default, we load the model with 4-bit quantization to make it fit in smaller hardwares. Set the environment variable `bits` to control the quantization.
|
64 |
+
|
65 |
+
Set the environment variable `model` to change the model, and switch hardware accordingly:
|
66 |
+
| Model | Hardware |
|
67 |
+
|-------|-------------------|
|
68 |
+
| liuhaotian/llava-v1.6-mistral-7b | T4-medium |
|
69 |
+
| liuhaotian/llava-v1.6-vicuna-7b | T4-medium |
|
70 |
+
| liuhaotian/llava-v1.6-vicuna-13b | T4-medium |
|
71 |
+
| liuhaotian/llava-v1.6-34b | 2xA10G large |
|
72 |
+
"""
|
73 |
+
|
74 |
print(f"args: {gws.args}")
|
75 |
|
76 |
model_path = os.getenv("model", "liuhaotian/llava-v1.6-mistral-7b")
|
examples/extreme_ironing.jpg
ADDED
examples/waterview.jpg
ADDED