it run on colab t4
!pip install git+https://github.com/huggingface/transformers
!pip install einops timm pillow
!pip install sentencepiece bitsandbytes protobuf
!pip install git+https://github.com/huggingface/accelerate
!pip install git+https://github.com/huggingface/diffusers
!git clone https://github.com/deepseek-ai/Janus.git
%cd Janus
!pip install -e .
!pip install flash-attn
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
specify the path to the model
model_path = "deepseek-ai/Janus-1.3B"
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
conversation = [
{
"role": "User",
"content": "\ndescripte the image.",
"images": ["/content/demo.jpeg"],
},
{"role": "Assistant", "content": ""},
]
load images and prepare for inputs
pil_images = load_pil_images(conversation)
prepare_inputs = vl_chat_processor(
conversations=conversation, images=pil_images, force_batchify=True
).to(vl_gpt.device)
# run image encoder to get the image embeddings
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
# run the model to get the response
outputs = vl_gpt.language_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=prepare_inputs.attention_mask,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=512,
do_sample=False,
use_cache=True,
)
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
print(f"{prepare_inputs['sft_format'][0]}", answer)
import torch
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
import numpy as np
import os
import PIL.Image
def setup_model(model_path="deepseek-ai/Janus-1.3B"):
# Initialize processor and handle warnings
vl_chat_processor = VLChatProcessor.from_pretrained(
model_path,
trust_remote_code=True
)
# Add image placeholder tag
vl_chat_processor.image_start_tag = "<image_placeholder>"
# Load model with proper device placement
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vl_gpt = AutoModelForCausalLM.from_pretrained(
model_path,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
device_map="auto",
offload_folder="offload",
offload_buffers=True
).to(device)
return vl_gpt, vl_chat_processor, device
@torch
.inference_mode()
def generate(
mmgpt: MultiModalityCausalLM,
vl_chat_processor: VLChatProcessor,
prompt: str,
device: torch.device,
temperature: float = 1,
parallel_size: int = 4,
cfg_weight: float = 5,
image_token_num_per_image: int = 576,
img_size: int = 384,
patch_size: int = 16,
):
torch.cuda.empty_cache()
# Ensure input_ids are on the correct device
input_ids = vl_chat_processor.tokenizer.encode(prompt)
input_ids = torch.LongTensor(input_ids).to(device)
tokens = torch.zeros((parallel_size*2, len(input_ids)), dtype=torch.int, device=device)
for i in range(parallel_size*2):
tokens[i, :] = input_ids
if i % 2 != 0:
tokens[i, 1:-1] = vl_chat_processor.tokenizer.pad_token_id
# Ensure model and inputs are on same device
mmgpt = mmgpt.to(device)
inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens)
generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int, device=device)
past_key_values = None
for i in range(image_token_num_per_image):
with torch.cuda.amp.autocast():
outputs = mmgpt.language_model.model(
inputs_embeds=inputs_embeds,
use_cache=True,
past_key_values=past_key_values
)
past_key_values = outputs.past_key_values
hidden_states = outputs.last_hidden_state
logits = mmgpt.gen_head(hidden_states[:, -1, :])
logit_cond = logits[0::2, :]
logit_uncond = logits[1::2, :]
logits = logit_uncond + cfg_weight * (logit_cond-logit_uncond)
probs = torch.softmax(logits / temperature, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
generated_tokens[:, i] = next_token.squeeze(dim=-1)
next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)
img_embeds = mmgpt.prepare_gen_img_embeds(next_token)
inputs_embeds = img_embeds.unsqueeze(dim=1)
if i % 100 == 0:
torch.cuda.empty_cache()
# Move tensors to CPU for final processing
dec = mmgpt.gen_vision_model.decode_code(
generated_tokens.to(dtype=torch.int),
shape=[parallel_size, 8, img_size//patch_size, img_size//patch_size]
)
dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)
dec = np.clip((dec + 1) / 2 * 255, 0, 255)
visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)
visual_img[:, :, :] = dec
os.makedirs('generated_samples', exist_ok=True)
for i in range(parallel_size):
save_path = os.path.join('generated_samples', f"img_{i}.jpg")
PIL.Image.fromarray(visual_img[i]).save(save_path)
def main():
# Set PyTorch memory management
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
# Setup model and get device
model_path = "deepseek-ai/Janus-1.3B"
vl_gpt, vl_chat_processor, device = setup_model(model_path)
conversation = [
{
"role": "User",
"content": "A close-up high-contrast photo of Sydney Opera House sitting next to Eiffel tower, under a blue night sky of roiling energy, exploding yellow stars, and radiating swirls of blue.",
},
{"role": "Assistant", "content": ""},
]
try:
sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts(
conversations=conversation,
sft_format=vl_chat_processor.sft_format,
system_prompt="",
)
prompt = sft_format + vl_chat_processor.image_start_tag
generate(vl_gpt, vl_chat_processor, prompt, device)
except Exception as e:
print(f"Error during generation: {str(e)}")
torch.cuda.empty_cache()
if name == "main":
main()
import torch
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
import numpy as np
import os
import PIL.Image
def setup_model(model_path="deepseek-ai/Janus-1.3B"):
# Initialize processor and handle warnings
vl_chat_processor = VLChatProcessor.from_pretrained(
model_path,
trust_remote_code=True
)
# Add image placeholder tag
vl_chat_processor.image_start_tag = "<image_placeholder>"
# Load model with proper device placement
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vl_gpt = AutoModelForCausalLM.from_pretrained(
model_path,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
device_map="auto",
offload_folder="offload",
offload_buffers=True
).to(device)
return vl_gpt, vl_chat_processor, device
@torch
.inference_mode()
def generate(
mmgpt: MultiModalityCausalLM,
vl_chat_processor: VLChatProcessor,
prompt: str,
device: torch.device,
temperature: float = 1,
parallel_size: int = 4,
cfg_weight: float = 5,
image_token_num_per_image: int = 576,
img_size: int = 384,
patch_size: int = 16,
):
torch.cuda.empty_cache()
# Ensure input_ids are on the correct device
input_ids = vl_chat_processor.tokenizer.encode(prompt)
input_ids = torch.LongTensor(input_ids).to(device)
tokens = torch.zeros((parallel_size*2, len(input_ids)), dtype=torch.int, device=device)
for i in range(parallel_size*2):
tokens[i, :] = input_ids
if i % 2 != 0:
tokens[i, 1:-1] = vl_chat_processor.tokenizer.pad_token_id
# Ensure model and inputs are on same device
mmgpt = mmgpt.to(device)
inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens)
generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int, device=device)
past_key_values = None
for i in range(image_token_num_per_image):
with torch.cuda.amp.autocast():
outputs = mmgpt.language_model.model(
inputs_embeds=inputs_embeds,
use_cache=True,
past_key_values=past_key_values
)
past_key_values = outputs.past_key_values
hidden_states = outputs.last_hidden_state
logits = mmgpt.gen_head(hidden_states[:, -1, :])
logit_cond = logits[0::2, :]
logit_uncond = logits[1::2, :]
logits = logit_uncond + cfg_weight * (logit_cond-logit_uncond)
probs = torch.softmax(logits / temperature, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
generated_tokens[:, i] = next_token.squeeze(dim=-1)
next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)
img_embeds = mmgpt.prepare_gen_img_embeds(next_token)
inputs_embeds = img_embeds.unsqueeze(dim=1)
if i % 100 == 0:
torch.cuda.empty_cache()
# Move tensors to CPU for final processing
dec = mmgpt.gen_vision_model.decode_code(
generated_tokens.to(dtype=torch.int),
shape=[parallel_size, 8, img_size//patch_size, img_size//patch_size]
)
dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)
dec = np.clip((dec + 1) / 2 * 255, 0, 255)
visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)
visual_img[:, :, :] = dec
os.makedirs('generated_samples', exist_ok=True)
for i in range(parallel_size):
save_path = os.path.join('generated_samples', f"img_{i}.jpg")
PIL.Image.fromarray(visual_img[i]).save(save_path)
def main():
# Set PyTorch memory management
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
# Setup model and get device
model_path = "deepseek-ai/Janus-1.3B"
vl_gpt, vl_chat_processor, device = setup_model(model_path)
conversation = [
{
"role": "User",
"content": "generate image for teen boy.",
},
{"role": "Assistant", "content": ""},
]
try:
sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts(
conversations=conversation,
sft_format=vl_chat_processor.sft_format,
system_prompt="",
)
prompt = sft_format + vl_chat_processor.image_start_tag
generate(vl_gpt, vl_chat_processor, prompt, device)
except Exception as e:
print(f"Error during generation: {str(e)}")
torch.cuda.empty_cache()
if name == "main":
main()
!pip install git+https://github.com/huggingface/transformers
!pip install einops timm pillow
!pip install sentencepiece bitsandbytes protobuf
!pip install git+https://github.com/huggingface/accelerate
!pip install git+https://github.com/huggingface/diffusers
!git clone https://github.com/deepseek-ai/Janus.git
%cd Janus
!pip install -e .
!pip install flash-attn
pic2text
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
specify the path to the model
model_path = "deepseek-ai/Janus-1.3B"
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
conversation = [
{
"role": "User",
"content": "\ndescripte the image.",
"images": ["/content/demo.jpeg"],
},
{"role": "Assistant", "content": ""},
]
load images and prepare for inputs
pil_images = load_pil_images(conversation)
prepare_inputs = vl_chat_processor(
conversations=conversation, images=pil_images, force_batchify=True
).to(vl_gpt.device)
# run image encoder to get the image embeddings
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
# run the model to get the response
outputs = vl_gpt.language_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=prepare_inputs.attention_mask,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=512,
do_sample=False,
use_cache=True,
)
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
print(f"{prepare_inputs['sft_format'][0]}", answer)
text2pic
import torch
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
import numpy as np
import os
import PIL.Image
def setup_model(model_path="deepseek-ai/Janus-1.3B"):
# Initialize processor and handle warnings
vl_chat_processor = VLChatProcessor.from_pretrained(
model_path,
trust_remote_code=True
)
# Add image placeholder tag
vl_chat_processor.image_start_tag = "<image_placeholder>"
# Load model with proper device placement
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vl_gpt = AutoModelForCausalLM.from_pretrained(
model_path,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
device_map="auto",
offload_folder="offload",
offload_buffers=True
).to(device)
return vl_gpt, vl_chat_processor, device
@torch
.inference_mode()
def generate(
mmgpt: MultiModalityCausalLM,
vl_chat_processor: VLChatProcessor,
prompt: str,
device: torch.device,
temperature: float = 1,
parallel_size: int = 4,
cfg_weight: float = 5,
image_token_num_per_image: int = 576,
img_size: int = 384,
patch_size: int = 16,
):
torch.cuda.empty_cache()
# Ensure input_ids are on the correct device
input_ids = vl_chat_processor.tokenizer.encode(prompt)
input_ids = torch.LongTensor(input_ids).to(device)
tokens = torch.zeros((parallel_size*2, len(input_ids)), dtype=torch.int, device=device)
for i in range(parallel_size*2):
tokens[i, :] = input_ids
if i % 2 != 0:
tokens[i, 1:-1] = vl_chat_processor.tokenizer.pad_token_id
# Ensure model and inputs are on same device
mmgpt = mmgpt.to(device)
inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens)
generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int, device=device)
past_key_values = None
for i in range(image_token_num_per_image):
with torch.cuda.amp.autocast():
outputs = mmgpt.language_model.model(
inputs_embeds=inputs_embeds,
use_cache=True,
past_key_values=past_key_values
)
past_key_values = outputs.past_key_values
hidden_states = outputs.last_hidden_state
logits = mmgpt.gen_head(hidden_states[:, -1, :])
logit_cond = logits[0::2, :]
logit_uncond = logits[1::2, :]
logits = logit_uncond + cfg_weight * (logit_cond-logit_uncond)
probs = torch.softmax(logits / temperature, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
generated_tokens[:, i] = next_token.squeeze(dim=-1)
next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)
img_embeds = mmgpt.prepare_gen_img_embeds(next_token)
inputs_embeds = img_embeds.unsqueeze(dim=1)
if i % 100 == 0:
torch.cuda.empty_cache()
# Move tensors to CPU for final processing
dec = mmgpt.gen_vision_model.decode_code(
generated_tokens.to(dtype=torch.int),
shape=[parallel_size, 8, img_size//patch_size, img_size//patch_size]
)
dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)
dec = np.clip((dec + 1) / 2 * 255, 0, 255)
visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)
visual_img[:, :, :] = dec
os.makedirs('generated_samples', exist_ok=True)
for i in range(parallel_size):
save_path = os.path.join('generated_samples', f"img_{i}.jpg")
PIL.Image.fromarray(visual_img[i]).save(save_path)
def main():
# Set PyTorch memory management
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
# Setup model and get device
model_path = "deepseek-ai/Janus-1.3B"
vl_gpt, vl_chat_processor, device = setup_model(model_path)
conversation = [
{
"role": "User",
"content": "generate image for teen boy.",
},
{"role": "Assistant", "content": ""},
]
try:
sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts(
conversations=conversation,
sft_format=vl_chat_processor.sft_format,
system_prompt="",
)
prompt = sft_format + vl_chat_processor.image_start_tag
generate(vl_gpt, vl_chat_processor, prompt, device)
except Exception as e:
print(f"Error during generation: {str(e)}")
torch.cuda.empty_cache()
if name == "main":
main()