forked from ShawK91/Evolutionary-Reinforcement-Learning
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathrun_erl.py
More file actions
213 lines (160 loc) · 8.34 KB
/
run_erl.py
File metadata and controls
213 lines (160 loc) · 8.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
import numpy as np, os, time, sys, random
from core import mod_neuro_evo as utils_ne
from core import mod_utils as utils
import gym, torch
from core import replay_memory
from core import ddpg as ddpg
import argparse
render = False
parser = argparse.ArgumentParser()
parser.add_argument('-env', help='Environment Choices: (HalfCheetah-v2) (Ant-v2) (Reacher-v2) (Walker2d-v2) (Swimmer-v2) (Hopper-v2)', required=True)
env_tag = vars(parser.parse_args())['env']
class Parameters:
def __init__(self):
#Number of Frames to Run
if env_tag == 'Hopper-v2': self.num_frames = 4000000
elif env_tag == 'Ant-v2': self.num_frames = 6000000
elif env_tag == 'Walker2d-v2': self.num_frames = 8000000
else: self.num_frames = 2000000
#USE CUDA
self.is_cuda = True; self.is_memory_cuda = True
#Sunchronization Period
if env_tag == 'Hopper-v2' or env_tag == 'Ant-v2': self.synch_period = 1
else: self.synch_period = 10
#DDPG params
self.use_ln = True
self.gamma = 0.99; self.tau = 0.001
self.seed = 7
self.batch_size = 128
self.buffer_size = 1000000
self.frac_frames_train = 1.0
self.use_done_mask = True
###### NeuroEvolution Params ########
#Num of trials
if env_tag == 'Hopper-v2' or env_tag == 'Reacher-v2': self.num_evals = 5
elif env_tag == 'Walker2d-v2': self.num_evals = 3
else: self.num_evals = 1
#Elitism Rate
if env_tag == 'Hopper-v2' or env_tag == 'Ant-v2': self.elite_fraction = 0.3
elif env_tag == 'Reacher-v2' or env_tag == 'Walker2d-v2': self.elite_fraction = 0.2
else: self.elite_fraction = 0.1
self.pop_size = 10
self.crossover_prob = 0.0
self.mutation_prob = 0.9
#Save Results
self.state_dim = None; self.action_dim = None #Simply instantiate them here, will be initialized later
self.save_foldername = 'R_ERL/'
if not os.path.exists(self.save_foldername): os.makedirs(self.save_foldername)
class Agent:
def __init__(self, args, env):
self.args = args; self.env = env
self.evolver = utils_ne.SSNE(self.args)
#Init population
self.pop = []
for _ in range(args.pop_size):
self.pop.append(ddpg.Actor(args))
#Turn off gradients and put in eval mode
for actor in self.pop: actor.eval()
#Init RL Agent
self.rl_agent = ddpg.DDPG(args)
self.replay_buffer = replay_memory.ReplayMemory(args.buffer_size)
self.ounoise = ddpg.OUNoise(args.action_dim)
#Trackers
self.num_games = 0; self.num_frames = 0; self.gen_frames = None
def add_experience(self, state, action, next_state, reward, done):
reward = utils.to_tensor(np.array([reward])).unsqueeze(0)
if self.args.is_cuda: reward = reward.cuda()
if self.args.use_done_mask:
done = utils.to_tensor(np.array([done]).astype('uint8')).unsqueeze(0)
if self.args.is_cuda: done = done.cuda()
action = utils.to_tensor(action)
if self.args.is_cuda: action = action.cuda()
self.replay_buffer.push(state, action, next_state, reward, done)
def evaluate(self, net, is_render, is_action_noise=False, store_transition=True):
total_reward = 0.0
state = self.env.reset()
state = utils.to_tensor(state).unsqueeze(0)
if self.args.is_cuda: state = state.cuda()
done = False
while not done:
if store_transition: self.num_frames += 1; self.gen_frames += 1
if render and is_render: self.env.render()
action = net.forward(state)
action.clamp(-1,1)
action = utils.to_numpy(action.cpu())
if is_action_noise: action += self.ounoise.noise()
next_state, reward, done, info = self.env.step(action.flatten()) #Simulate one step in environment
next_state = utils.to_tensor(next_state).unsqueeze(0)
if self.args.is_cuda:
next_state = next_state.cuda()
total_reward += reward
if store_transition: self.add_experience(state, action, next_state, reward, done)
state = next_state
if store_transition: self.num_games += 1
return total_reward
def rl_to_evo(self, rl_net, evo_net):
for target_param, param in zip(evo_net.parameters(), rl_net.parameters()):
target_param.data.copy_(param.data)
def train(self):
self.gen_frames = 0
####################### EVOLUTION #####################
all_fitness = []
#Evaluate genomes/individuals
for net in self.pop:
fitness = 0.0
for eval in range(self.args.num_evals): fitness += self.evaluate(net, is_render=False, is_action_noise=False)
all_fitness.append(fitness/self.args.num_evals)
best_train_fitness = max(all_fitness)
worst_index = all_fitness.index(min(all_fitness))
#Validation test
champ_index = all_fitness.index(max(all_fitness))
test_score = 0.0
for eval in range(5): test_score += self.evaluate(self.pop[champ_index], is_render=True, is_action_noise=False, store_transition=False)/5.0
#NeuroEvolution's probabilistic selection and recombination step
elite_index = self.evolver.epoch(self.pop, all_fitness)
####################### DDPG #########################
#DDPG Experience Collection
self.evaluate(self.rl_agent.actor, is_render=False, is_action_noise=True) #Train
#DDPG learning step
if len(self.replay_buffer) > self.args.batch_size * 5:
for _ in range(int(self.gen_frames*self.args.frac_frames_train)):
transitions = self.replay_buffer.sample(self.args.batch_size)
batch = replay_memory.Transition(*zip(*transitions))
self.rl_agent.update_parameters(batch)
#Synch RL Agent to NE
if self.num_games % self.args.synch_period == 0:
self.rl_to_evo(self.rl_agent.actor, self.pop[worst_index])
self.evolver.rl_policy = worst_index
print('Synch from RL --> Nevo')
return best_train_fitness, test_score, elite_index
if __name__ == "__main__":
parameters = Parameters() # Create the Parameters class
tracker = utils.Tracker(parameters, ['erl'], '_score.csv') # Initiate tracker
frame_tracker = utils.Tracker(parameters, ['frame_erl'], '_score.csv') # Initiate tracker
time_tracker = utils.Tracker(parameters, ['time_erl'], '_score.csv')
#Create Env
env = utils.NormalizedActions(gym.make(env_tag))
parameters.action_dim = env.action_space.shape[0]
parameters.state_dim = env.observation_space.shape[0]
#Seed
env.seed(parameters.seed);
torch.manual_seed(parameters.seed); np.random.seed(parameters.seed); random.seed(parameters.seed)
#Create Agent
agent = Agent(parameters, env)
print('Running', env_tag, ' State_dim:', parameters.state_dim, ' Action_dim:', parameters.action_dim)
next_save = 100; time_start = time.time()
while agent.num_frames <= parameters.num_frames:
best_train_fitness, erl_score, elite_index = agent.train()
print('#Games:', agent.num_games, '#Frames:', agent.num_frames, ' Epoch_Max:', '%.2f'%best_train_fitness if best_train_fitness != None else None, ' Test_Score:','%.2f'%erl_score if erl_score != None else None, ' Avg:','%.2f'%tracker.all_tracker[0][1], 'ENV '+env_tag)
print('RL Selection Rate: Elite/Selected/Discarded', '%.2f'%(agent.evolver.selection_stats['elite']/agent.evolver.selection_stats['total']),
'%.2f' % (agent.evolver.selection_stats['selected'] / agent.evolver.selection_stats['total']),
'%.2f' % (agent.evolver.selection_stats['discarded'] / agent.evolver.selection_stats['total']))
print()
tracker.update([erl_score], agent.num_games)
frame_tracker.update([erl_score], agent.num_frames)
time_tracker.update([erl_score], time.time()-time_start)
#Save Policy
if agent.num_games > next_save:
next_save += 100
if elite_index != None: torch.save(agent.pop[elite_index].state_dict(), parameters.save_foldername + 'evo_net')
print("Progress Saved")