aboutsummaryrefslogtreecommitdiffstats
path: root/System/Pendulum/homework8.ipynb
blob: a9c28c9b20f9e11683e20b3becf9b513b47d3b2b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gym\n",
    "import swingUp\n",
    "import numpy as np\n",
    "import numpy.random as rnd\n",
    "import torch as pt\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# The Swing-Up Problem\n",
    "\n",
    "In this homework we will study a classic problem of swinging up a pendulum and keeping it balanced. In this problem, forces in the range $[-1,1]$ are applied to a cart with a pendulum on it. The episode ends if one of the boundaries is reached or the pendulum swings too fast. Otherwise, it just keeps going. You recieve a reward of 1 any time the pendulum is above horizontal. You recieve a negative cost if the episode ends becauseof hitting the boundary or swinging too fast.\n",
    "\n",
    "The code below demonstrates a basic SARSA method with a neural network for approximating the $Q$-function. This system has continuous observation and action spaces. However, for simplicity and applicability of the basic SARSA method, we are only using two actions."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class nnQ(pt.nn.Module):\n",
    "    \"\"\"\n",
    "    Here is a basic neural network with for representing a policy \n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self,stateDim,numActions,numHiddenUnits,numLayers):\n",
    "        super().__init__()\n",
    "        \n",
    "        InputLayer = [pt.nn.Linear(stateDim+numActions,numHiddenUnits),\n",
    "                      pt.nn.ReLU()]\n",
    "        \n",
    "        HiddenLayers = []\n",
    "        for _ in range(numLayers-1):\n",
    "            HiddenLayers.append(pt.nn.Linear(numHiddenUnits,numHiddenUnits))\n",
    "            HiddenLayers.append(pt.nn.ReLU())\n",
    "            \n",
    "        \n",
    "        OutputLayer = [pt.nn.Linear(numHiddenUnits,1)]\n",
    "        \n",
    "        AllLayers = InputLayer + HiddenLayers + OutputLayer\n",
    "        self.net = pt.nn.Sequential(*AllLayers)\n",
    "        \n",
    "        self.numActions = numActions\n",
    "        \n",
    "    def forward(self,x,a):\n",
    "        x = pt.tensor(x,dtype=pt.float32)\n",
    "\n",
    "        b = pt.nn.functional.one_hot(pt.tensor(a),self.numActions)\n",
    "        \n",
    "        c = b.float().detach()\n",
    "        y = pt.cat([x,c])\n",
    "        \n",
    "        return self.net(y)\n",
    "        \n",
    "    \n",
    "class sarsaAgent:\n",
    "    def __init__(self,stateDim,numActions,numHiddenUnits,numLayers,\n",
    "                epsilon=.1,gamma=.9,alpha=.1):\n",
    "        self.Q = nnQ(stateDim,numActions,numHiddenUnits,numLayers)\n",
    "        self.gamma = gamma\n",
    "        self.epsilon = epsilon\n",
    "        self.alpha = alpha\n",
    "        self.numActions = numActions\n",
    "        self.s_last = None\n",
    "    def action(self,x):\n",
    "        # This is an epsilon greedy selection\n",
    "        if rnd.rand() < self.epsilon:\n",
    "            a = rnd.randint(numActions)\n",
    "        else:\n",
    "            qBest = -np.inf\n",
    "            for aTest in range(self.numActions):\n",
    "                qTest = self.Q(x,aTest).detach().numpy()[0]\n",
    "                if qTest > qBest:\n",
    "                    qBest = qTest\n",
    "                    a = aTest\n",
    "        return a\n",
    "    \n",
    "    def update(self,s,a,r,s_next,done):\n",
    "        \n",
    "        # Compute the TD error, if there is enough data\n",
    "        update = True\n",
    "        if done:\n",
    "            Q_cur = self.Q(s,a).detach().numpy()[0]\n",
    "            delta = r - Q_cur\n",
    "            self.s_last = None\n",
    "            Q_diff = self.Q(s,a)\n",
    "        elif self.s_last is not None:\n",
    "            Q_next = self.Q(s,a).detach().numpy()[0]\n",
    "            Q_cur = self.Q(self.s_last,self.a_last).detach().numpy()[0]\n",
    "            delta = self.r_last + self.gamma * Q_next - Q_cur\n",
    "            Q_diff = self.Q(self.s_last,self.a_last)\n",
    "        else:\n",
    "            update = False\n",
    "            \n",
    "        # Update the parameter via the semi-gradient method\n",
    "        if update:\n",
    "            self.Q.zero_grad()\n",
    "            Q_diff.backward()\n",
    "            for p in self.Q.parameters():\n",
    "                p.data.add_(self.alpha*delta,p.grad.data)\n",
    "            \n",
    "            \n",
    "        \n",
    "        if not done:\n",
    "            self.s_last = np.copy(s)\n",
    "            self.a_last = np.copy(a)\n",
    "            self.r_last = np.copy(r)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# The Simulation\n",
    "\n",
    "Here is the simulation. It takes about an hour to run this (at least on my computer). I suggest you make yourself a very complex, luxurious sandwich while you wait. Alternatively, just decrease `maxSteps` to a more civilised value.\n",
    "\n",
    "It doesn't learn the task perfectly, but it will do pretty well for stretches. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Episode: 1 Total Steps: 705 , Ave. Reward: -0.7323210705117651 , Episode Length: 705 Max Up-Time: 54\n",
      "Episode: 2 Total Steps: 1087 , Ave. Reward: -1.386005534482438 , Episode Length: 382 Max Up-Time: 38\n",
      "Episode: 3 Total Steps: 1563 , Ave. Reward: -0.757185810040358 , Episode Length: 476 Max Up-Time: 52\n",
      "Episode: 4 Total Steps: 1871 , Ave. Reward: -2.3208087438993275 , Episode Length: 308 Max Up-Time: 86\n",
      "Episode: 5 Total Steps: 2193 , Ave. Reward: -2.1598192165326444 , Episode Length: 322 Max Up-Time: 33\n",
      "Episode: 6 Total Steps: 2855 , Ave. Reward: -0.7392412280730795 , Episode Length: 662 Max Up-Time: 45\n",
      "Episode: 7 Total Steps: 3307 , Ave. Reward: -1.3685011673110579 , Episode Length: 452 Max Up-Time: 60\n",
      "Episode: 8 Total Steps: 3893 , Ave. Reward: -0.745503768027489 , Episode Length: 586 Max Up-Time: 27\n",
      "Episode: 9 Total Steps: 4243 , Ave. Reward: -1.8577023916291222 , Episode Length: 350 Max Up-Time: 44\n",
      "Episode: 10 Total Steps: 4510 , Ave. Reward: -0.7272320071143903 , Episode Length: 267 Max Up-Time: 29\n",
      "Episode: 11 Total Steps: 5193 , Ave. Reward: -0.36655536116630455 , Episode Length: 683 Max Up-Time: 39\n",
      "Episode: 12 Total Steps: 6005 , Ave. Reward: -0.2880664852587831 , Episode Length: 812 Max Up-Time: 57\n",
      "Episode: 13 Total Steps: 6476 , Ave. Reward: -1.9700554803843828 , Episode Length: 471 Max Up-Time: 40\n",
      "Episode: 14 Total Steps: 6654 , Ave. Reward: -3.5124131530851304 , Episode Length: 178 Max Up-Time: 33\n",
      "Episode: 15 Total Steps: 7120 , Ave. Reward: -1.0719102072420077 , Episode Length: 466 Max Up-Time: 30\n",
      "Episode: 16 Total Steps: 7385 , Ave. Reward: -1.2138522089615815 , Episode Length: 265 Max Up-Time: 30\n",
      "Episode: 17 Total Steps: 7890 , Ave. Reward: -0.9911817636578898 , Episode Length: 505 Max Up-Time: 34\n",
      "Episode: 18 Total Steps: 8348 , Ave. Reward: -0.24673268477366056 , Episode Length: 458 Max Up-Time: 24\n",
      "Episode: 19 Total Steps: 8885 , Ave. Reward: -0.19901803972570822 , Episode Length: 537 Max Up-Time: 34\n",
      "Episode: 20 Total Steps: 9257 , Ave. Reward: -1.0397485980722023 , Episode Length: 372 Max Up-Time: 47\n",
      "Episode: 21 Total Steps: 9593 , Ave. Reward: -1.9637917520715307 , Episode Length: 336 Max Up-Time: 62\n",
      "Episode: 22 Total Steps: 10146 , Ave. Reward: -1.9249864843211308 , Episode Length: 553 Max Up-Time: 37\n",
      "Episode: 23 Total Steps: 10413 , Ave. Reward: -2.4530561661945107 , Episode Length: 267 Max Up-Time: 45\n",
      "Episode: 24 Total Steps: 11569 , Ave. Reward: -0.6792904495051211 , Episode Length: 1156 Max Up-Time: 78\n",
      "Episode: 25 Total Steps: 11982 , Ave. Reward: -0.8938237978597035 , Episode Length: 413 Max Up-Time: 39\n",
      "Episode: 26 Total Steps: 12243 , Ave. Reward: -1.2015515630784517 , Episode Length: 261 Max Up-Time: 20\n",
      "Episode: 27 Total Steps: 12419 , Ave. Reward: -2.887021412838564 , Episode Length: 176 Max Up-Time: 34\n",
      "Episode: 28 Total Steps: 13159 , Ave. Reward: -0.6110313146967027 , Episode Length: 740 Max Up-Time: 37\n",
      "Episode: 29 Total Steps: 13589 , Ave. Reward: -1.928537880786853 , Episode Length: 430 Max Up-Time: 75\n",
      "Episode: 30 Total Steps: 14149 , Ave. Reward: -2.057777774292444 , Episode Length: 560 Max Up-Time: 33\n",
      "Episode: 31 Total Steps: 14662 , Ave. Reward: -1.1384388629314761 , Episode Length: 513 Max Up-Time: 75\n",
      "Episode: 32 Total Steps: 15058 , Ave. Reward: -0.7835031778339514 , Episode Length: 396 Max Up-Time: 31\n",
      "Episode: 33 Total Steps: 15649 , Ave. Reward: -0.8815494773451172 , Episode Length: 591 Max Up-Time: 57\n",
      "Episode: 34 Total Steps: 16393 , Ave. Reward: -1.2573853074557841 , Episode Length: 744 Max Up-Time: 44\n",
      "Episode: 35 Total Steps: 16920 , Ave. Reward: -1.7520053641436282 , Episode Length: 527 Max Up-Time: 60\n",
      "Episode: 36 Total Steps: 17557 , Ave. Reward: -1.3575707923649754 , Episode Length: 637 Max Up-Time: 29\n",
      "Episode: 37 Total Steps: 18229 , Ave. Reward: -1.1195498770809882 , Episode Length: 672 Max Up-Time: 53\n",
      "Episode: 38 Total Steps: 18618 , Ave. Reward: -3.222163638956208 , Episode Length: 389 Max Up-Time: 35\n",
      "Episode: 39 Total Steps: 19276 , Ave. Reward: -0.5477558848181898 , Episode Length: 658 Max Up-Time: 65\n",
      "Episode: 40 Total Steps: 19804 , Ave. Reward: -0.6584685777265661 , Episode Length: 528 Max Up-Time: 59\n",
      "Episode: 41 Total Steps: 20571 , Ave. Reward: -0.4637257282420204 , Episode Length: 767 Max Up-Time: 66\n",
      "Episode: 42 Total Steps: 21002 , Ave. Reward: -1.0716300038503115 , Episode Length: 431 Max Up-Time: 34\n",
      "Episode: 43 Total Steps: 21892 , Ave. Reward: -0.5765091241222032 , Episode Length: 890 Max Up-Time: 43\n",
      "Episode: 44 Total Steps: 22898 , Ave. Reward: -0.014668360500057598 , Episode Length: 1006 Max Up-Time: 82\n",
      "Episode: 45 Total Steps: 23543 , Ave. Reward: -0.9241536579523029 , Episode Length: 645 Max Up-Time: 83\n",
      "Episode: 46 Total Steps: 23871 , Ave. Reward: -1.0800045254850208 , Episode Length: 328 Max Up-Time: 54\n",
      "Episode: 47 Total Steps: 24746 , Ave. Reward: -0.4107819625154534 , Episode Length: 875 Max Up-Time: 55\n",
      "Episode: 48 Total Steps: 25088 , Ave. Reward: -1.2879242748541877 , Episode Length: 342 Max Up-Time: 33\n",
      "Episode: 49 Total Steps: 26200 , Ave. Reward: -0.26045605232327973 , Episode Length: 1112 Max Up-Time: 41\n",
      "Episode: 50 Total Steps: 26444 , Ave. Reward: -1.919118523045648 , Episode Length: 244 Max Up-Time: 39\n",
      "Episode: 51 Total Steps: 27253 , Ave. Reward: -1.4967671580395374 , Episode Length: 809 Max Up-Time: 46\n",
      "Episode: 52 Total Steps: 27729 , Ave. Reward: -2.8176206551741148 , Episode Length: 476 Max Up-Time: 79\n",
      "Episode: 53 Total Steps: 28229 , Ave. Reward: -0.6498666207781548 , Episode Length: 500 Max Up-Time: 41\n",
      "Episode: 54 Total Steps: 28535 , Ave. Reward: -1.442685133245505 , Episode Length: 306 Max Up-Time: 50\n",
      "Episode: 55 Total Steps: 29554 , Ave. Reward: -0.7346716942732042 , Episode Length: 1019 Max Up-Time: 91\n",
      "Episode: 56 Total Steps: 30478 , Ave. Reward: -0.2849088403628985 , Episode Length: 924 Max Up-Time: 74\n",
      "Episode: 57 Total Steps: 31018 , Ave. Reward: -0.9564715045670377 , Episode Length: 540 Max Up-Time: 37\n",
      "Episode: 58 Total Steps: 31702 , Ave. Reward: -0.029077670750031454 , Episode Length: 684 Max Up-Time: 107\n",
      "Episode: 59 Total Steps: 32987 , Ave. Reward: -0.7201722007016701 , Episode Length: 1285 Max Up-Time: 61\n",
      "Episode: 60 Total Steps: 33662 , Ave. Reward: -1.447542019951657 , Episode Length: 675 Max Up-Time: 36\n",
      "Episode: 61 Total Steps: 34853 , Ave. Reward: -0.7092105324653137 , Episode Length: 1191 Max Up-Time: 60\n",
      "Episode: 62 Total Steps: 35556 , Ave. Reward: -1.008758527335291 , Episode Length: 703 Max Up-Time: 70\n",
      "Episode: 63 Total Steps: 37253 , Ave. Reward: -0.11298222499952609 , Episode Length: 1697 Max Up-Time: 71\n",
      "Episode: 64 Total Steps: 37607 , Ave. Reward: -2.333472374918777 , Episode Length: 354 Max Up-Time: 42\n",
      "Episode: 65 Total Steps: 38769 , Ave. Reward: -0.5435572183910845 , Episode Length: 1162 Max Up-Time: 86\n",
      "Episode: 66 Total Steps: 40346 , Ave. Reward: -0.3510286890823708 , Episode Length: 1577 Max Up-Time: 35\n",
      "Episode: 67 Total Steps: 41342 , Ave. Reward: -0.43548023889291587 , Episode Length: 996 Max Up-Time: 56\n",
      "Episode: 68 Total Steps: 41732 , Ave. Reward: -0.5497570997221486 , Episode Length: 390 Max Up-Time: 23\n",
      "Episode: 69 Total Steps: 42909 , Ave. Reward: 0.038723967455411054 , Episode Length: 1177 Max Up-Time: 50\n",
      "Episode: 70 Total Steps: 43482 , Ave. Reward: -0.6763002936147164 , Episode Length: 573 Max Up-Time: 44\n",
      "Episode: 71 Total Steps: 44648 , Ave. Reward: -0.9062670195421423 , Episode Length: 1166 Max Up-Time: 38\n",
      "Episode: 72 Total Steps: 45758 , Ave. Reward: -0.30692741058433193 , Episode Length: 1110 Max Up-Time: 65\n",
      "Episode: 73 Total Steps: 45969 , Ave. Reward: -5.942005811194079 , Episode Length: 211 Max Up-Time: 33\n",
      "Episode: 74 Total Steps: 46338 , Ave. Reward: -2.397374412919753 , Episode Length: 369 Max Up-Time: 46\n",
      "Episode: 75 Total Steps: 46748 , Ave. Reward: -0.8976414519854521 , Episode Length: 410 Max Up-Time: 51\n",
      "Episode: 76 Total Steps: 47539 , Ave. Reward: -0.7642109985612977 , Episode Length: 791 Max Up-Time: 36\n",
      "Episode: 77 Total Steps: 48602 , Ave. Reward: -0.28253135177080774 , Episode Length: 1063 Max Up-Time: 69\n",
      "Episode: 78 Total Steps: 48996 , Ave. Reward: -0.9534163332275297 , Episode Length: 394 Max Up-Time: 20\n",
      "Episode: 79 Total Steps: 50055 , Ave. Reward: -0.2423828962026703 , Episode Length: 1059 Max Up-Time: 91\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Episode: 80 Total Steps: 50944 , Ave. Reward: -0.6279331535186513 , Episode Length: 889 Max Up-Time: 52\n",
      "Episode: 81 Total Steps: 52280 , Ave. Reward: -0.20073270114025632 , Episode Length: 1336 Max Up-Time: 83\n",
      "Episode: 82 Total Steps: 53883 , Ave. Reward: -0.08541144990249938 , Episode Length: 1603 Max Up-Time: 122\n",
      "Episode: 83 Total Steps: 54334 , Ave. Reward: -0.34031538317112564 , Episode Length: 451 Max Up-Time: 44\n",
      "Episode: 84 Total Steps: 55892 , Ave. Reward: -0.8371231570383377 , Episode Length: 1558 Max Up-Time: 34\n",
      "Episode: 85 Total Steps: 57479 , Ave. Reward: -0.699271602876404 , Episode Length: 1587 Max Up-Time: 41\n",
      "Episode: 86 Total Steps: 61409 , Ave. Reward: -0.1733200848853596 , Episode Length: 3930 Max Up-Time: 67\n",
      "Episode: 87 Total Steps: 63851 , Ave. Reward: -0.08644986908609424 , Episode Length: 2442 Max Up-Time: 81\n",
      "Episode: 88 Total Steps: 69570 , Ave. Reward: -0.09404408310442923 , Episode Length: 5719 Max Up-Time: 38\n",
      "Episode: 89 Total Steps: 76704 , Ave. Reward: -0.17318490459394822 , Episode Length: 7134 Max Up-Time: 35\n",
      "Episode: 90 Total Steps: 90786 , Ave. Reward: -0.0956459185629743 , Episode Length: 14082 Max Up-Time: 28\n"
     ]
    }
   ],
   "source": [
    "# This is the environment\n",
    "env = swingUp.SwingUpEnv()\n",
    "\n",
    "# For simplicity, we only consider forces of -1 and 1\n",
    "numActions = 2\n",
    "Actions = np.linspace(-1,1,numActions)\n",
    "\n",
    "# This is our learning agent\n",
    "gamma = .95\n",
    "agent = sarsaAgent(5,numActions,20,1,epsilon=5e-2,gamma=gamma,alpha=1e-5)\n",
    "\n",
    "maxSteps = 2e5\n",
    "\n",
    "# This is a helper to deal with the fact that x[2] is actually an angle\n",
    "x_to_y = lambda x : np.array([x[0],x[1],np.cos(x[2]),np.sin(x[2]),x[3]])\n",
    "\n",
    "R = []\n",
    "UpTime = []\n",
    "\n",
    "step = 0\n",
    "ep = 0\n",
    "while step < maxSteps:\n",
    "    ep += 1\n",
    "    x = env.reset()\n",
    "    C = 0.\n",
    "    \n",
    "    done = False\n",
    "    t = 1\n",
    "    while not done:\n",
    "        t += 1\n",
    "        step += 1\n",
    "        y = x_to_y(x)\n",
    "        a = agent.action(y)\n",
    "        u = Actions[a:a+1]\n",
    "        env.render()\n",
    "        x_next,c,done,info = env.step(u)\n",
    "        \n",
    "        max_up_time = info['max_up_time']\n",
    "        y_next = x_to_y(x_next)\n",
    "\n",
    "        C += (1./t)*(c-C)\n",
    "        agent.update(y,a,c,y_next,done)\n",
    "        x = x_next\n",
    "        if done:\n",
    "            break\n",
    "            \n",
    "        if step >= maxSteps:\n",
    "            break\n",
    "            \n",
    "        \n",
    "        R.append(C)\n",
    "    UpTime.append(max_up_time)\n",
    "    #print('t:',ep+1,', R:',C,', L:',t-1,', G:',G,', Q:', Q_est, 'U:', max_up_time)\n",
    "    print('Episode:',ep,'Total Steps:',step,', Ave. Reward:',C,', Episode Length:',t-1, 'Max Up-Time:', max_up_time)\n",
    "env.close()\n",
    "\n",
    "plt.plot(UpTime)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "In each episode, the system tracks the \"Maximum Up-Time\", which is the longest stretch of time that the pendulum tip was above the horizontal"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Question\n",
    "\n",
    "Implement your own controller or modify the code above. Try a different function approximation and see if you can get longer up time, learn more quickly, or more consistently. As with the code above, plot the maximum up time for each episode."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Make your code here."
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [default]",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}