ShuoZheLi commited on
Commit
00b19e2
1 Parent(s): f738bd1

new antmaze generate and dataset

Browse files
antmaze-large-play-v2_easy_dataset.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:497a8a3afc71b90710ee3e93622590798da21977a43a7eaa633605bd880053ce
3
+ size 43492523
antmaze-large-play-v2_hard_dataset.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74de0389013c8c1e2f3ae6dff24f58bf9899e41bee150a50de4f4010fede8178
3
+ size 23192290
antmaze-large-play-v2_medium_dataset.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d5519c42a95443d0b808953f17fbda577d0879f89e10eea7837292cba1f79db
3
+ size 27286784
antmaze-medium-play-v2_easy_dataset.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d92648c6e6cc7e3cb88a627bb4fe4aff11acc686d6f69cccd581b300b8d3ae8d
3
+ size 35364691
antmaze-medium-play-v2_hard_dataset.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7e8befc64ed8acd747e067229f9a1ce2ecb4e3a70b5d83818f83e5198d4af3d
3
+ size 26610812
antmaze-medium-play-v2_medium_dataset.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf17c31fe55a9d65df0e6d89b5891269ffe69ae9b4929574224cfaf4575ea055
3
+ size 30656089
antmaze_dataset.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ import random
4
+ import uuid
5
+ from dataclasses import asdict, dataclass
6
+ from pathlib import Path
7
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
8
+ import pickle
9
+ import d4rl
10
+ import gym
11
+ import numpy as np
12
+ import pyrallis
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.nn.functional as F
16
+ import wandb
17
+ from torch.distributions import Normal
18
+ from torch.optim.lr_scheduler import CosineAnnealingLR
19
+
20
+ @dataclass
21
+ class TrainConfig:
22
+ #############################
23
+ ######### Experiment ########
24
+ #############################
25
+ env_1: str = "antmaze-medium-play-v2"
26
+ level: str = "hard"
27
+
28
+ def qlearning_dataset(env, dataset=None, terminate_on_end=False, **kwargs):
29
+ if dataset is None:
30
+ dataset = env.get_dataset(**kwargs)
31
+
32
+ init_obs_index = np.unique(np.concatenate((np.where(dataset['terminals'])[0][:-1] + 1, np.where(dataset['timeouts'])[0][:-1] + 1)))
33
+ init_obs_ = dataset['observations'][init_obs_index]
34
+
35
+ init_pos = []
36
+ init_pos_in_current_traj = dataset['observations'][0][:2]
37
+ for i in range(len(dataset['observations'])):
38
+ if i in init_obs_index:
39
+ init_pos_in_current_traj = dataset['observations'][i][:2]
40
+ init_pos.append(init_pos_in_current_traj)
41
+ init_pos = np.array(init_pos)
42
+
43
+ hardness = {'easy': 0.36, 'medium': 0.4, 'hard': 0.45}
44
+ obs = dataset['observations']
45
+ length = dataset['observations'].shape[0]
46
+ POSITIONS = obs[:,:2]
47
+ GOAL = dataset['infos/goal']
48
+ MINIMAL_POSITION = init_pos
49
+ # get maximal Euclidean distance
50
+ # MAX_EU_DIS = (GOAL - MINIMAL_POSITION)**2
51
+ MAX_EU_DIS = np.linalg.norm(GOAL - MINIMAL_POSITION, axis=1)
52
+ # DIS = ((POSITIONS - MINIMAL_POSITION)**2) / MAX_EU_DIS
53
+ DIS = np.linalg.norm(POSITIONS - MINIMAL_POSITION, axis=1) / MAX_EU_DIS
54
+ save_idx = np.random.random(size=length) > (DIS * hardness[config.level] * 10)
55
+ small_data = {}
56
+ for key in dataset.keys():
57
+ small_data[key] = dataset[key][save_idx]
58
+ dataset = small_data
59
+
60
+
61
+
62
+ N = dataset['rewards'].shape[0]
63
+ obs_ = []
64
+ next_obs_ = []
65
+ action_ = []
66
+ reward_ = []
67
+ done_ = []
68
+ timeout_ = []
69
+ task_horizon = []
70
+
71
+ # The newer version of the dataset adds an explicit
72
+ # timeouts field. Keep old method for backwards compatability.
73
+ use_timeouts = False
74
+ if 'timeouts' in dataset:
75
+ use_timeouts = True
76
+
77
+ episode_step = 0
78
+ for i in range(N-1):
79
+ obs = dataset['observations'][i].astype(np.float32)
80
+ new_obs = dataset['observations'][i+1].astype(np.float32)
81
+ action = dataset['actions'][i].astype(np.float32)
82
+ reward = dataset['rewards'][i].astype(np.float32)
83
+ done_bool = bool(dataset['terminals'][i])
84
+ timeout_bool = bool(dataset['timeouts'][i])
85
+
86
+ if use_timeouts:
87
+ final_timestep = dataset['timeouts'][i]
88
+ else:
89
+ final_timestep = (episode_step == env._max_episode_steps - 1)
90
+ if (not terminate_on_end) and final_timestep:
91
+ # Skip this transition and don't apply terminals on the last step of an episode
92
+ episode_step = 0
93
+ continue
94
+ if done_bool or final_timestep:
95
+ episode_step = 0
96
+
97
+ obs_.append(obs)
98
+ next_obs_.append(new_obs)
99
+ action_.append(action)
100
+ reward_.append(reward)
101
+ done_.append(done_bool)
102
+ timeout_.append(timeout_bool)
103
+ task_horizon.append(episode_step)
104
+ episode_step += 1
105
+
106
+
107
+ # add in return for each episode
108
+ return_list = [0]
109
+ length = [0]
110
+ for i in range(len(done_)):
111
+ return_list[-1] += reward_[i]
112
+ length[-1] += 1
113
+ if done_[i] or timeout_[i]:
114
+ return_list.append(0)
115
+ length.append(0)
116
+
117
+ count = 0
118
+ data_return_list = [0] * len(done_)
119
+ for i in range(len(done_)):
120
+ data_return_list[i] = return_list[count]
121
+ if done_[i] or timeout_[i]:
122
+ count +=1
123
+
124
+ data_return_list = env.get_normalized_score(np.array(data_return_list)) * 100.0
125
+ data_return_list = np.array(data_return_list)
126
+
127
+
128
+ epi_obs = []
129
+ epi_n_obs = []
130
+ epi_terminals = []
131
+ epi_rewards = []
132
+ epi_returns = []
133
+ epi_actions = []
134
+ obs = []
135
+ n_obs = []
136
+ terminals = []
137
+ rewards = []
138
+ actions = []
139
+ # task_horizon = []
140
+ task_step = 0
141
+ for i in range(len(done_)):
142
+ obs.append(obs_[i])
143
+ n_obs.append(next_obs_[i])
144
+ terminals.append(done_[i])
145
+ rewards.append(reward_[i])
146
+ actions.append(action_[i])
147
+ # task_horizon.append(task_step)
148
+ task_step += 1
149
+ if done_[i] or timeout_[i]:
150
+ epi_obs.append(np.array(obs))
151
+ epi_n_obs.append(np.array(n_obs))
152
+ epi_terminals.append(np.array(terminals))
153
+ epi_rewards.append(np.array(rewards))
154
+ epi_returns.append(data_return_list[i])
155
+ epi_actions.append(np.array(actions))
156
+ obs = []
157
+ n_obs = []
158
+ terminals = []
159
+ rewards = []
160
+ actions = []
161
+ task_step = 0
162
+
163
+ transition_ids = np.arange(len(obs_))
164
+
165
+
166
+ return {
167
+ 'observations': np.array(obs_),
168
+ 'actions': np.array(action_),
169
+ 'next_observations': np.array(next_obs_),
170
+ 'rewards': np.array(reward_),
171
+ 'terminals': np.array(done_),
172
+ 'timeouts': np.array(timeout_),
173
+ 'init_states': np.array(init_obs_),
174
+ 'transition_ids': transition_ids,
175
+ 'returns': data_return_list,
176
+ 'epi_obs': np.array(epi_obs, dtype=object),
177
+ 'epi_n_obs': np.array(epi_n_obs, dtype=object),
178
+ 'epi_terminals': np.array(epi_terminals, dtype=object),
179
+ 'epi_rewards': np.array(epi_rewards, dtype=object),
180
+ 'epi_returns': np.array(epi_returns, dtype=object),
181
+ 'epi_actions': np.array(epi_actions, dtype=object),
182
+ 'task_horizon':np.array(task_horizon, dtype=object),
183
+ }
184
+
185
+
186
+ if __name__ == "__main__":
187
+ config = pyrallis.parse(config_class=TrainConfig)
188
+
189
+ env = gym.make(config.env_1)
190
+ dataset = qlearning_dataset(env)
191
+
192
+ # save dataset as npy
193
+ with open(f'./{config.env_1}_{config.level}_dataset.npy', 'wb') as f:
194
+ pickle.dump(dataset, f)
195
+
196
+