Delete value_guided_diffuser.py
Browse files- value_guided_diffuser.py +0 -88
value_guided_diffuser.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from diffusers import DiffusionPipeline
|
3 |
-
import tqdm
|
4 |
-
|
5 |
-
from diffusers.models.unet_1d import UNet1DModel
|
6 |
-
from diffusers.utils.dummy_pt_objects import DDPMScheduler
|
7 |
-
|
8 |
-
|
9 |
-
class ValueGuidedDiffuserPipeline(DiffusionPipeline):
|
10 |
-
def __init__(self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env, *args, **kwargs):
|
11 |
-
super().__init__(*args, **kwargs)
|
12 |
-
self.value_function = value_function
|
13 |
-
self.unet = unet
|
14 |
-
self.scheduler = scheduler
|
15 |
-
self.env = env
|
16 |
-
self.data = env.get_dataset()
|
17 |
-
self.means = dict((key, val.mean(axis=0)) for key, val in self.data.items())
|
18 |
-
self.stds = dict((key, val.std(axis=0)) for key, val in self.data.items())
|
19 |
-
self.device = self.unet.device
|
20 |
-
self.state_dim = env.observation_space.shape[0]
|
21 |
-
self.action_dim = env.action_space.shape[0]
|
22 |
-
|
23 |
-
def normalize(self, x_in, key):
|
24 |
-
return (x_in - self.means[key]) / self.stds[key]
|
25 |
-
|
26 |
-
def de_normalize(self, x_in, key):
|
27 |
-
return x_in * self.stds[key] + self.means[key]
|
28 |
-
|
29 |
-
def to_torch(self, x_in):
|
30 |
-
|
31 |
-
if type(x_in) is dict:
|
32 |
-
return {k: self.to_torch(v) for k, v in x_in.items()}
|
33 |
-
elif torch.is_tensor(x_in):
|
34 |
-
return x_in.to(self.device)
|
35 |
-
return torch.tensor(x_in, device=self.device)
|
36 |
-
|
37 |
-
def reset_x0(self, x_in, cond, act_dim):
|
38 |
-
for key, val in cond.items():
|
39 |
-
x_in[:, key, act_dim:] = val.clone()
|
40 |
-
return x_in
|
41 |
-
|
42 |
-
def run_diffusion(self, x, conditions, n_guide_steps, scale):
|
43 |
-
batch_size = x.shape[0]
|
44 |
-
y = None
|
45 |
-
for i in tqdm.tqdm(self.scheduler.timesteps):
|
46 |
-
# create batch of timesteps to pass into model
|
47 |
-
timesteps = torch.full((batch_size,), i, device=self.device, dtype=torch.long)
|
48 |
-
# 3. call the sample function
|
49 |
-
for _ in range(n_guide_steps):
|
50 |
-
with torch.enable_grad():
|
51 |
-
x.requires_grad_()
|
52 |
-
y = self.value_function(x, timesteps).sample
|
53 |
-
grad = torch.autograd.grad([y.sum()], [x])[0]
|
54 |
-
|
55 |
-
posterior_variance = self.scheduler._get_variance(i)
|
56 |
-
model_std = torch.exp(0.5 * posterior_variance)
|
57 |
-
grad = model_std * grad
|
58 |
-
grad[timesteps < 2] = 0
|
59 |
-
x = x.detach()
|
60 |
-
x = x + scale * grad
|
61 |
-
x = self.reset_x0(x, conditions, self.action_dim)
|
62 |
-
# with torch.no_grad():
|
63 |
-
prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
|
64 |
-
x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"]
|
65 |
-
|
66 |
-
# 4. apply conditions to the trajectory
|
67 |
-
x = self.reset_x0(x, conditions, self.action_dim)
|
68 |
-
x = self.to_torch(x, device=self.device)
|
69 |
-
# y = network(x, timesteps).sample
|
70 |
-
return x, y
|
71 |
-
|
72 |
-
def __call__(self, obs, batch_size=64, planning_horizon=20, n_guide_steps=2, scale=0.1):
|
73 |
-
obs = self.normalize(obs, "observations")
|
74 |
-
obs = obs[None].repeat(batch_size, axis=0)
|
75 |
-
conditions = {0: self.to_torch(obs)}
|
76 |
-
shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
|
77 |
-
x1 = torch.randn(shape, device=self.device)
|
78 |
-
x = self.reset_x0(x1, conditions, self.action_dim)
|
79 |
-
x = self.to_torch(x)
|
80 |
-
x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
|
81 |
-
sorted_idx = y.argsort(0, descending=True).squeeze()
|
82 |
-
sorted_values = x[sorted_idx]
|
83 |
-
actions = sorted_values[:, :, : self.action_dim]
|
84 |
-
actions = actions.detach().cpu().numpy()
|
85 |
-
denorm_actions = self.de_normalize(actions, key="actions")
|
86 |
-
# denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0]
|
87 |
-
denorm_actions = denorm_actions[0, 0]
|
88 |
-
return denorm_actions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|