Es ist eine Lösung des Problems Copy-v0 von OpenAI Gym [^ 1].
https://gym.openai.com/envs/Copy-v0
In der Fortsetzung des folgenden Artikels habe ich es durch Q-Lernen gelöst.
http://qiita.com/namakemono/items/16f31c207a4f19c5a4df
Q(s,a) \leftarrow Q(s,a) + \alpha \left\{ r(s,a,s') + \gamma\max_{a}Q(s',a') - Q(s,a) \right\} \\
r(s,a,s') = \mathbb{E}[R_{t+1} | S_t=s, A_t=a, S_{t+1}=s']
\begin{align}
Q(s,a) &= r(s,a)+\gamma \sum_{s' \in S} \max_{a' \in A(s')} p(s'|s,a) Q(s',a') & \\
&\simeq r(s,a)+\gamma \max_{a' \in A(s')} Q(s',a') & (\because s' \sim p(s'|s,a), s'Vorausgesetzt, es ist unwahrscheinlich, dass es anders ist als) \\
&\simeq (1-\alpha)Q(s,a) + \alpha \left \{ r(s,a)+\gamma \max_{a' \in A(s')} Q(s',a') \right\} & (\weil glätten) \\
&= Q(s,a) + \alpha \left \{ r(s,a)+\gamma \max_{a' \in A(s')} Q(s',a') - Q(s,a)) \right \} &
\end{align}
import numpy as np
import gym
from gym import wrappers
def run(alpha=0.3, gamma=0.9):
Q = {}
env = gym.make("Copy-v0")
env = wrappers.Monitor(env, '/tmp/copy-v0-q-learning', force=True)
Gs = []
for episode in range(10**6):
x = env.reset()
X, A, R = [], [], [] # States, Actions, Rewards
done = False
while not done:
if (np.random.random() < 0.01) or (not x in Q):
a = env.action_space.sample()
else:
a = sorted(Q[x].items(), key=lambda _: -_[1])[0][0]
X.append(x)
A.append(a)
if not x in Q:
Q[x] = {}
if not a in Q[x]:
Q[x][a] = 0
x, r, done, _ = env.step(a)
R.append(r)
T = len(X)
x, a, r = X[-1], A[-1], R[-1]
Q[x][a] += alpha * (r - Q[x][a])
for t in range(T-2, -1, -1):
x, nx, a, r = X[t], X[t+1], A[t], R[t]
Q[x][a] += alpha * (r + gamma * np.max(Q[nx].values()) - Q[x][a])
G = sum(R) # Revenue
print "Episode: %d, Revenue: %d" % (episode, G)
Gs.append(G)
if np.mean(Gs[-100:]) > 25.0:
break
if __name__ == "__main__":
run()
Episode: 30229, Reward: 29
References
Recommended Posts