multimodalart HF staff commited on
Commit
791e825
1 Parent(s): 1e07928

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -10,13 +10,13 @@ dtype = torch.bfloat16
10
  device = "cuda"
11
 
12
  bfl_repo = "black-forest-labs/FLUX.1-dev"
13
- scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler", revision="refs/pr/1")
14
  text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
15
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
16
- text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype, revision="refs/pr/1")
17
- tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype, revision="refs/pr/1")
18
- vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype, revision="refs/pr/1")
19
- transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, torch_dtype=dtype, revision="refs/pr/1")
20
 
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
 
10
  device = "cuda"
11
 
12
  bfl_repo = "black-forest-labs/FLUX.1-dev"
13
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler", revision="refs/pr/3")
14
  text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
15
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
16
+ text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype, revision="refs/pr/3")
17
+ tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype, revision="refs/pr/3")
18
+ vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype, revision="refs/pr/3")
19
+ transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, torch_dtype=dtype, revision="refs/pr/3")
20
 
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22