Update README.md
Browse files
README.md
CHANGED
@@ -57,86 +57,30 @@ tags:
|
|
57 |
|
58 |
## 🤖 Inference with VideoLLaMA2
|
59 |
```python
|
60 |
-
import torch
|
61 |
-
import transformers
|
62 |
-
|
63 |
import sys
|
64 |
sys.path.append('./')
|
65 |
-
from videollama2
|
66 |
-
from videollama2.
|
67 |
-
from videollama2.mm_utils import get_model_name_from_path, tokenizer_MMODAL_token, KeywordsStoppingCriteria, process_video, process_image
|
68 |
-
from videollama2.model.builder import load_pretrained_model
|
69 |
|
70 |
|
71 |
def inference():
|
72 |
-
|
73 |
-
paths = ['assets/cat_and_chicken.mp4']
|
74 |
-
questions = ['What animals are in the video, what are they doing, and how does the video feel?']
|
75 |
-
# Reply:
|
76 |
-
# The video features a kitten and a baby chick playing together. The kitten is seen laying on the floor while the baby chick hops around. The two animals interact playfully with each other, and the video has a cute and heartwarming feel to it.
|
77 |
-
modal_list = ['video']
|
78 |
|
79 |
# Video Inference
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
modal_list = ['video']
|
85 |
-
|
86 |
# Image Inference
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
modal_list = ['image']
|
92 |
-
|
93 |
-
# 1. Initialize the model.
|
94 |
model_path = 'DAMO-NLP-SG/VideoLLaMA2-7B-16F-Base'
|
95 |
-
|
96 |
-
|
97 |
-
model = model.to('cuda:0')
|
98 |
-
conv_mode = 'llama_2'
|
99 |
-
|
100 |
-
# 2. Visual preprocess (load & transform image or video).
|
101 |
-
if modal_list[0] == 'video':
|
102 |
-
tensor = process_video(paths[0], processor, model.config.image_aspect_ratio).to(dtype=torch.float16, device='cuda', non_blocking=True)
|
103 |
-
default_mm_token = DEFAULT_MMODAL_TOKEN["VIDEO"]
|
104 |
-
modal_token_index = MMODAL_TOKEN_INDEX["VIDEO"]
|
105 |
-
else:
|
106 |
-
tensor = process_image(paths[0], processor, model.config.image_aspect_ratio)[0].to(dtype=torch.float16, device='cuda', non_blocking=True)
|
107 |
-
default_mm_token = DEFAULT_MMODAL_TOKEN["IMAGE"]
|
108 |
-
modal_token_index = MMODAL_TOKEN_INDEX["IMAGE"]
|
109 |
-
tensor = [tensor]
|
110 |
-
|
111 |
-
# 3. Text preprocess (tag process & generate prompt).
|
112 |
-
question = default_mm_token + "\n" + questions[0]
|
113 |
-
conv = conv_templates[conv_mode].copy()
|
114 |
-
conv.append_message(conv.roles[0], question)
|
115 |
-
conv.append_message(conv.roles[1], None)
|
116 |
-
prompt = conv.get_prompt()
|
117 |
-
input_ids = tokenizer_MMODAL_token(prompt, tokenizer, modal_token_index, return_tensors='pt').unsqueeze(0).to('cuda:0')
|
118 |
-
|
119 |
-
# 4. Generate a response according to visual signals and prompts.
|
120 |
-
stop_str = conv.sep if conv.sep_style in [SeparatorStyle.SINGLE] else conv.sep2
|
121 |
-
# keywords = ["<s>", "</s>"]
|
122 |
-
keywords = [stop_str]
|
123 |
-
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
124 |
-
|
125 |
-
with torch.inference_mode():
|
126 |
-
output_ids = model.generate(
|
127 |
-
input_ids,
|
128 |
-
images_or_videos=tensor,
|
129 |
-
modal_list=modal_list,
|
130 |
-
do_sample=True,
|
131 |
-
temperature=0.2,
|
132 |
-
max_new_tokens=1024,
|
133 |
-
use_cache=True,
|
134 |
-
stopping_criteria=[stopping_criteria],
|
135 |
-
)
|
136 |
-
|
137 |
-
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
|
138 |
-
print(outputs[0])
|
139 |
|
|
|
140 |
|
141 |
if __name__ == "__main__":
|
142 |
inference()
|
|
|
57 |
|
58 |
## 🤖 Inference with VideoLLaMA2
|
59 |
```python
|
|
|
|
|
|
|
60 |
import sys
|
61 |
sys.path.append('./')
|
62 |
+
from videollama2 import model_init, mm_infer
|
63 |
+
from videollama2.utils import disable_torch_init
|
|
|
|
|
64 |
|
65 |
|
66 |
def inference():
|
67 |
+
disable_torch_init()
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
# Video Inference
|
70 |
+
modal = 'video'
|
71 |
+
modal_path = 'assets/cat_and_chicken.mp4'
|
72 |
+
instruct = 'What animals are in the video, what are they doing, and how does the video feel?'
|
73 |
+
|
|
|
|
|
74 |
# Image Inference
|
75 |
+
modal = 'image'
|
76 |
+
modal_path = 'assets/sora.png'
|
77 |
+
instruct = 'What is the woman wearing, what is she doing, and how does the image feel?'
|
78 |
+
|
|
|
|
|
|
|
79 |
model_path = 'DAMO-NLP-SG/VideoLLaMA2-7B-16F-Base'
|
80 |
+
model, processor, tokenizer = model_init(model_path)
|
81 |
+
output = mm_infer(processor[modal](modal_path), instruct, model=model, tokenizer=tokenizer, do_sample=False, modal=modal)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
+
print(output)
|
84 |
|
85 |
if __name__ == "__main__":
|
86 |
inference()
|