Spaces:
Runtime error
Runtime error
Caroline Mai Chan
commited on
Commit
•
3a27073
1
Parent(s):
764782e
add title, description, forward pass
Browse files- app.py +8 -8
- requirements.txt +2 -1
app.py
CHANGED
@@ -87,17 +87,17 @@ model = Generator(3, 1, 3)
|
|
87 |
model.load_state_dict(torch.load('model.pth', map_location=torch.device('cpu')))
|
88 |
model.eval()
|
89 |
|
90 |
-
transforms_r = [transforms.Resize(int(opt.size), Image.BICUBIC),
|
91 |
-
transforms.ToTensor()]
|
92 |
-
transforms.Compose([transforms.Resize(int(opt.size), Image.BICUBIC), transforms.ToTensor()])
|
93 |
-
|
94 |
def predict(input_img):
|
95 |
input_img = Image.open(input_img)
|
96 |
-
|
97 |
-
input_img =
|
98 |
-
|
99 |
|
100 |
-
|
|
|
|
|
|
|
|
|
101 |
|
102 |
title="informative-drawings"
|
103 |
description="Gradio Demo for line drawing generation. "
|
|
|
87 |
model.load_state_dict(torch.load('model.pth', map_location=torch.device('cpu')))
|
88 |
model.eval()
|
89 |
|
|
|
|
|
|
|
|
|
90 |
def predict(input_img):
|
91 |
input_img = Image.open(input_img)
|
92 |
+
transforms.Compose([transforms.Resize(256, Image.BICUBIC), transforms.ToTensor()])
|
93 |
+
input_img = transform(input_img)
|
94 |
+
input_img = torch.unsqueeze(input_img, 0)
|
95 |
|
96 |
+
with torch.no_grad():
|
97 |
+
drawing = model(input_img)[0].detach()
|
98 |
+
|
99 |
+
drawing = transforms.ToPILImage()(drawing)
|
100 |
+
return drawing
|
101 |
|
102 |
title="informative-drawings"
|
103 |
description="Gradio Demo for line drawing generation. "
|
requirements.txt
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
torch
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|