We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 7afca9a commit 038f9f1Copy full SHA for 038f9f1
10 - DQN/agent.py
@@ -21,7 +21,7 @@
21
OBSERVE = 100
22
23
# action: 0: 좌, 1: 유지, 2: 우
24
-NUN_ACTION = 3
+NUM_ACTION = 3
25
SCREEN_WIDTH = 6
26
SCREEN_HEIGHT = 10
27
@@ -31,7 +31,7 @@ def train():
31
sess = tf.Session()
32
33
game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, show_game=False)
34
- brain = DQN(sess, SCREEN_WIDTH, SCREEN_HEIGHT, NUN_ACTION)
+ brain = DQN(sess, SCREEN_WIDTH, SCREEN_HEIGHT, NUM_ACTION)
35
36
rewards = tf.placeholder(tf.float32, [None])
37
tf.summary.scalar('avg.reward/ep.', tf.reduce_mean(rewards))
@@ -68,7 +68,7 @@ def train():
68
# 초반에는 거의 대부분 랜덤값을 사용하다가 점점 줄어들어
69
# 나중에는 거의 사용하지 않게됩니다.
70
if np.random.rand() < epsilon:
71
- action = random.randrange(NUN_ACTION)
+ action = random.randrange(NUM_ACTION)
72
else:
73
action = brain.get_action()
74
AltStyle によって変換されたページ (->オリジナル) / アドレス: モード: デフォルト 音声ブラウザ ルビ付き 配色反転 文字拡大 モバイル
0 commit comments