m7mdal7aj commited on
Commit
2a9f27a
1 Parent(s): eedbfb7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -8,7 +8,7 @@ from PIL import Image
8
  from transformers import Blip2Processor, Blip2ForConditionalGeneration, InstructBlipProcessor, InstructBlipForConditionalGeneration
9
 
10
 
11
- def load_caption_model(blip2=False, instructblip=True):
12
 
13
  if blip2:
14
  processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True,torch_dtype=torch.float16, device_map="cuda")
@@ -32,7 +32,7 @@ def answer_question(image, question, model, processor):
32
 
33
  inputs = processor(image, question, return_tensors="pt").to("cuda", torch.float16)
34
 
35
- out = model.generate(**inputs, max_length=200, min_length=20).to("cuda", torch.float16)
36
 
37
  answer = processor.decode(out[0], skip_special_tokens=True).strip()
38
  return answer
 
8
  from transformers import Blip2Processor, Blip2ForConditionalGeneration, InstructBlipProcessor, InstructBlipForConditionalGeneration
9
 
10
 
11
+ def load_caption_model(blip2=True, instructblip=False):
12
 
13
  if blip2:
14
  processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True,torch_dtype=torch.float16, device_map="cuda")
 
32
 
33
  inputs = processor(image, question, return_tensors="pt").to("cuda", torch.float16)
34
 
35
+ out = model.generate(**inputs, max_length=200, min_length=20)
36
 
37
  answer = processor.decode(out[0], skip_special_tokens=True).strip()
38
  return answer