-
Notifications
You must be signed in to change notification settings - Fork 6
/
natureqn_atari.py
43 lines (35 loc) · 1.46 KB
/
natureqn_atari.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gym
from utils.preprocess import greyscale
from utils.wrappers import PreproWrapper, MaxAndSkipEnv
from schedule import LinearExploration, LinearSchedule
from natureqn import NatureQN
from config import atariconfig_teacher as config
"""
Use deep Q network for the Atari game. Please report the final result.
Feel free to change the configurations (in the configs/ folder).
If so, please report your hyperparameters.
You'll find the results, log and video recordings of your agent every 250k under
the corresponding file in the results folder. A good way to monitor the progress
of the training is to use Tensorboard. The starter code writes summaries of different
variables.
To launch tensorboard, open a Terminal window and run
tensorboard --logdir=results/
Then, connect remotely to
address-ip-of-the-server:6006
6006 is the default port used by tensorboard.
"""
if __name__ == '__main__':
# make env
env = gym.make(config.env_name)
env = MaxAndSkipEnv(env, skip=config.skip_frame)
env = PreproWrapper(env, prepro=greyscale, shape=(80, 80, 1),
overwrite_render=config.overwrite_render)
# exploration strategy
exp_schedule = LinearExploration(env, config.eps_begin,
config.eps_end, config.eps_nsteps)
# learning rate schedule
lr_schedule = LinearSchedule(config.lr_begin, config.lr_end,
config.lr_nsteps)
# train model
model = NatureQN(env, config)
model.run(exp_schedule, lr_schedule)