Spaces:
BAAI
/
Running on L40S

ryanzhangfan commited on
Commit
57afb6c
1 Parent(s): a345b1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -38,6 +38,7 @@ VQ_HUB = "BAAI/Emu3-VisionTokenizer"
38
 
39
  # Prepare models and processors
40
  # Emu3-Gen model and processor
 
41
  gen_model = AutoModelForCausalLM.from_pretrained(
42
  EMU_GEN_HUB,
43
  device_map="cpu",
@@ -45,6 +46,7 @@ gen_model = AutoModelForCausalLM.from_pretrained(
45
  attn_implementation="flash_attention_2",
46
  trust_remote_code=True,
47
  )
 
48
 
49
  # Emu3-Chat model and processor
50
  chat_model = AutoModelForCausalLM.from_pretrained(
@@ -67,10 +69,11 @@ processor = Emu3Processor(
67
  )
68
 
69
  print(device)
70
- gen_model.to(device)
71
  chat_model.to(device)
72
  image_tokenizer.to(device)
73
 
 
74
  @spaces.GPU(duration=120)
75
  def generate_image(prompt):
76
  POSITIVE_PROMPT = " masterpiece, film grained, best quality."
@@ -130,6 +133,7 @@ def generate_image(prompt):
130
  if isinstance(im, Image.Image):
131
  return im
132
  return None
 
133
 
134
  def vision_language_understanding(image, text):
135
  inputs = processor(
 
38
 
39
  # Prepare models and processors
40
  # Emu3-Gen model and processor
41
+ """
42
  gen_model = AutoModelForCausalLM.from_pretrained(
43
  EMU_GEN_HUB,
44
  device_map="cpu",
 
46
  attn_implementation="flash_attention_2",
47
  trust_remote_code=True,
48
  )
49
+ """
50
 
51
  # Emu3-Chat model and processor
52
  chat_model = AutoModelForCausalLM.from_pretrained(
 
69
  )
70
 
71
  print(device)
72
+ # gen_model.to(device)
73
  chat_model.to(device)
74
  image_tokenizer.to(device)
75
 
76
+ """
77
  @spaces.GPU(duration=120)
78
  def generate_image(prompt):
79
  POSITIVE_PROMPT = " masterpiece, film grained, best quality."
 
133
  if isinstance(im, Image.Image):
134
  return im
135
  return None
136
+ """
137
 
138
  def vision_language_understanding(image, text):
139
  inputs = processor(