Update README.md to match chat-v1.1 model
Browse files
README.md
CHANGED
@@ -58,7 +58,7 @@ with torch.no_grad():
|
|
58 |
# vqa example
|
59 |
|
60 |
query = 'How many houses are there in this cartoon?'
|
61 |
-
image = Image.open(requests.get('https://github.com/THUDM/CogVLM/blob/main/examples/
|
62 |
inputs = model.build_conversation_input_ids(tokenizer, query=query, history=[], images=[image], template_version='vqa') # vqa mode
|
63 |
inputs = {
|
64 |
'input_ids': inputs['input_ids'].unsqueeze(0).to('cuda'),
|
@@ -73,7 +73,7 @@ with torch.no_grad():
|
|
73 |
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
74 |
print(tokenizer.decode(outputs[0]))
|
75 |
|
76 |
-
# 4
|
77 |
```
|
78 |
|
79 |
# 方法(Method)
|
|
|
58 |
# vqa example
|
59 |
|
60 |
query = 'How many houses are there in this cartoon?'
|
61 |
+
image = Image.open(requests.get('https://github.com/THUDM/CogVLM/blob/main/examples/3.jpg?raw=true', stream=True).raw).convert('RGB')
|
62 |
inputs = model.build_conversation_input_ids(tokenizer, query=query, history=[], images=[image], template_version='vqa') # vqa mode
|
63 |
inputs = {
|
64 |
'input_ids': inputs['input_ids'].unsqueeze(0).to('cuda'),
|
|
|
73 |
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
74 |
print(tokenizer.decode(outputs[0]))
|
75 |
|
76 |
+
# 4</s>
|
77 |
```
|
78 |
|
79 |
# 方法(Method)
|