-
Notifications
You must be signed in to change notification settings - Fork 2
/
model.js
62 lines (52 loc) · 1.74 KB
/
model.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
//<![CDATA[
// a few things don't have var in front of them - they update already existing variables the game needs
lanesSide = 0;
patchesAhead = 1;
patchesBehind = 0;
trainIterations = 10000;
var num_inputs = (lanesSide * 2 + 1) * (patchesAhead + patchesBehind);
var num_actions = 5;
var temporal_window = 3;
var network_size = num_inputs * temporal_window + num_actions * temporal_window + num_inputs;
var layer_defs = [];
layer_defs.push({
type: 'input',
out_sx: 1,
out_sy: 1,
out_depth: network_size
});
layer_defs.push({
type: 'fc',
num_neurons: 1,
activation: 'relu'
});
layer_defs.push({
type: 'regression',
num_neurons: num_actions
});
var tdtrainer_options = {
learning_rate: 0.001,
momentum: 0.0,
batch_size: 64,
l2_decay: 0.01
};
var opt = {};
opt.temporal_window = temporal_window;
opt.experience_size = 3000;
opt.start_learn_threshold = 500;
opt.gamma = 0.7;
opt.learning_steps_total = 10000;
opt.learning_steps_burnin = 1000;
opt.epsilon_min = 0.0;
opt.epsilon_test_time = 0.0;
opt.layer_defs = layer_defs;
opt.tdtrainer_options = tdtrainer_options;
brain = new deepqlearn.Brain(num_inputs, num_actions, opt);
learn = function (state, lastReward) {
brain.backward(lastReward);
var action = brain.forward(state);
draw_net();
draw_stats();
return action;
}
//]]>