-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathenvironment.py
333 lines (267 loc) · 10.5 KB
/
environment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
import logging
from typing import Any, Dict, Literal, Optional, Tuple, Union
import cv2
import gym.logger
import gym_super_mario_bros
import numpy as np
import numpy.typing as npt
from dataclasses import dataclass
from gym.core import Env, ObservationWrapper, Wrapper
from gym.spaces import Box, Discrete, MultiDiscrete
from gym.vector import AsyncVectorEnv
from gym.wrappers.frame_stack import FrameStack
from gym.wrappers.gray_scale_observation import GrayScaleObservation
from gym.wrappers.record_episode_statistics import RecordEpisodeStatistics
from gym_super_mario_bros.actions import COMPLEX_MOVEMENT, SIMPLE_MOVEMENT
from nes_py.wrappers import JoypadSpace
from config.environment import EnvironmentConfig
gym.logger.set_level(logging.ERROR)
BASE_WIDTH = 256
BASE_HEIGHT = 240
StepType = Tuple[np.ndarray, float, bool, bool, Dict[str, Any]]
class BaseEnvironment(JoypadSpace):
"""
This class represents the basic game environment, initialized with a certain configuration.
It inherits from JoypadSpace, which is a class that allows us to translate our own custom actions into sequences
of button presses that the NES emulator will understand.
"""
def __init__(
self,
config: EnvironmentConfig,
level: str,
render_mode: Optional[Union[Literal["human"], Literal["rgb_array"]]] = None,
) -> None:
"""
Constructor for the BaseEnvironment.
:param config: Configuration for the environment
:param level: The specific level of the game
"""
name = config.env_name
if level:
name = name.replace("Bros-v", f"Bros-{level}-v")
super().__init__(
gym_super_mario_bros.make( # type: ignore
name,
apply_api_compatibility=True,
render_mode=render_mode,
),
COMPLEX_MOVEMENT if config.complex_movement else SIMPLE_MOVEMENT,
)
class ClipWrapper(ObservationWrapper):
"""
This class represents a wrapper for the game environment that clips the observations from the game to a certain size
"""
def __init__(self, config: EnvironmentConfig, env: Env, **_) -> None:
"""
Constructor for the ClipWrapper.
:param config: Environment configuration
:param env: The environment to be wrapped
:param kwargs: Additional arguments
"""
super().__init__(env)
self.clip_top = config.clip_top
self.clip_bot = config.clip_bot
self.clip_left = config.clip_left
self.clip_right = config.clip_right
self.new_width = BASE_WIDTH - self.clip_left - self.clip_right
self.new_height = BASE_HEIGHT - self.clip_top - self.clip_bot
obs_shape = self.observation_space.shape
if obs_shape is None:
obs_shape = (self.new_height, self.new_width)
else:
obs_shape = (self.new_height, self.new_width, *obs_shape[2:])
self.observation_space = Box(
low=0,
high=255,
shape=obs_shape,
dtype=np.uint8,
)
def observation(self, observation: npt.NDArray) -> npt.NDArray:
"""
Clip the observation based on the pre-configured settings.
:param observation: The original observation
:return: The clipped observation
"""
return observation[
self.clip_top : -self.clip_bot if self.clip_bot else None,
self.clip_left : -self.clip_right if self.clip_right else None,
...,
]
class SubsampleWrapper(ObservationWrapper):
"""
This class represents a wrapper for the game environment that subsamples the observations from the game.
"""
def __init__(self, config: EnvironmentConfig, env: Env, **_) -> None:
"""
Constructor for the SubsampleWrapper.
:param config: Environment configuration
:param env: The environment to be wrapped
:param kwargs: Additional arguments
"""
super().__init__(env)
factor = config.subsampling_factor
obs_shape = self.observation_space.shape
if obs_shape is None:
obs_shape = (int(BASE_HEIGHT / factor), int(BASE_WIDTH / factor))
else:
h, w = obs_shape[:2]
obs_shape = (int(h / factor), int(w / factor), *obs_shape[2:])
self.height, self.width = obs_shape[:2]
assert self.height > 0 and self.width > 0, "The sub-sampled observation must have positive dimensions."
self.observation_space = Box(
low=0,
high=255,
shape=obs_shape,
dtype=np.uint8,
)
def observation(self, observation: npt.NDArray) -> npt.NDArray:
"""
Subsample the observation based on the pre-configured settings.
:param observation: The original observation
:return: The sub-sampled observation
"""
result = cv2.resize(observation, (self.height, self.width))
if len(result.shape) == 2:
result = np.expand_dims(result, -1)
return result
class ActionRepeatWrapper(Wrapper):
"""
This class represents a wrapper for the game environment that repeats the same action for multiple steps.
"""
def __init__(self, config: EnvironmentConfig, env: Env, **_) -> None:
"""
Constructor for the ActionRepeatWrapper.
:param num_repeat_frames: Number of times to repeat the same action
:param env: The environment to be wrapped
:param kwargs: Additional arguments
"""
super().__init__(env)
self.num_repeat_frames = config.num_repeat_frames
def step(self, action: int) -> StepType:
"""
Perform the action and repeat it for a predefined number of frames.
If the episode terminates or is truncated before all frames are executed, the process is stopped early.
:param action: The action to be performed
:return: The state, total reward, termination and truncation flags, and metrics
"""
state, reward, terminated, truncated, metrics = super().step(action)
total_reward = reward
for _ in range(self.num_repeat_frames - 1):
if terminated or truncated:
break
state, reward, terminated, truncated, metrics = super().step(action)
total_reward += reward
return state, total_reward, terminated, truncated, metrics
class CustomRewardWrapper(Wrapper):
"""
This class represents a wrapper for the game environment that computes a custom reward based on the score.
"""
def __init__(self, *args, **kwargs) -> None:
"""
Constructor for the CustomRewardWrapper.
:param args: Positional arguments
:param kwargs: Additional arguments
"""
super().__init__(*args, **kwargs)
self.prev_y_pos = float("inf")
self.prev_action = -1
self.fell_off = False
def reset(self) -> Tuple[npt.NDArray, dict]:
"""
Reset the environment and the current score.
:return: The initial state and information
"""
self.prev_y_pos = float("inf")
self.prev_action = -1
self.fell_off = False
return super().reset()
def step(self, action: int) -> StepType:
"""
Perform the action and compute the custom reward.
:param action: The action to be performed
:return: The state, custom reward, termination and truncation flags, and info
"""
state, reward, terminated, truncated, info = super().step(action)
reward /= 10
reward += max(0, info["y_pos"] - self.prev_y_pos) / 20
reward += (self.prev_action == action) / 20
self.prev_y_pos = info["y_pos"]
self.prev_action = action
if info["y_pos"] < 79 and not self.fell_off:
reward -= 2
self.fell_off = True
if terminated or truncated:
if info["flag_get"]:
reward += 5
elif not self.fell_off:
reward -= 5
return state, reward, terminated, truncated, info
def get_environment(
config: EnvironmentConfig,
**kwargs,
) -> Env:
"""
Set up the game environment with the given configuration.
This includes several wrappers for preprocessing observations and actions.
:param config: Configuration for the environment
:param kwargs: Additional arguments
:return: The wrapped environment
"""
env = BaseEnvironment(config, **kwargs)
if config.grayscale:
env = GrayScaleObservation(env, keep_dim=True)
env = ClipWrapper(config, env)
env = SubsampleWrapper(config, env)
env = CustomRewardWrapper(env)
env = ActionRepeatWrapper(config, env)
env = FrameStack(env, config.num_stack_frames)
env = RecordEpisodeStatistics(env)
return env
def get_multiprocess_environment(num_environments: int, *args, **kwargs) -> AsyncVectorEnv:
"""
Set up a multiprocess game environment.
:param num_environments: Number of parallel environments
:param args: Positional arguments for get_environment()
:param kwargs: Keyword arguments for get_environment()
:return: The vectorized environment
"""
return AsyncVectorEnv(
[lambda: get_environment(*args, **kwargs) for _ in range(num_environments)],
copy=False,
)
@dataclass
class EnvironmentInfo:
width: int
height: int
stack_frames: int
image_channels: int
num_actions: int
num_workers: int
@property
def total_channel_dim(self):
return self.stack_frames * self.image_channels
def get_env_info(env: Env) -> EnvironmentInfo:
"""
Get information about the game environment.
:param env: The environment to get information from
:return: An EnvironmentInfo object with details about the environment
"""
obs_space = env.observation_space.shape
assert obs_space is not None, "Observation space is None."
assert len(obs_space) >= 3, "Observation space dimensions are not sufficient."
height, width, channels = obs_space[-3:]
if len(obs_space) >= 4:
stack_frames = obs_space[-4]
else:
stack_frames = 1
act_space = env.action_space
if isinstance(act_space, Discrete):
num_actions = act_space.n
else:
assert isinstance(act_space, MultiDiscrete), "Action space type not supported."
num_actions = act_space.nvec[0]
if isinstance(env, AsyncVectorEnv):
num_workers = env.num_envs
else:
num_workers = 1
return EnvironmentInfo(width, height, stack_frames, channels, num_actions, num_workers)