Skip to content

Commit e606ce0

Browse files
committed
Add files via upload
1 parent b87bf34 commit e606ce0

5 files changed

+1774
-0
lines changed

1_LinearRegression_160516.ipynb

+296
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,296 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"\\* *[Notice] I wrote thie code while following the examples in [Choi's Tesorflow-101 tutorial](https://github.com/sjchoi86/Tensorflow-101). And, as I know, most of Choi's examples originally come from [Aymeric Damien's](https://github.com/aymericdamien/TensorFlow-Examples/) and [Nathan Lintz's ](https://github.com/nlintz/TensorFlow-Tutorials) tutorials.*"
8+
]
9+
},
10+
{
11+
"cell_type": "markdown",
12+
"metadata": {},
13+
"source": [
14+
"## 1. Linear Regression"
15+
]
16+
},
17+
{
18+
"cell_type": "code",
19+
"execution_count": 1,
20+
"metadata": {
21+
"collapsed": true
22+
},
23+
"outputs": [],
24+
"source": [
25+
"import tensorflow as tf\n",
26+
"import numpy as np\n",
27+
"import matplotlib.pyplot as plt\n",
28+
"#%matplotlib inline "
29+
]
30+
},
31+
{
32+
"cell_type": "markdown",
33+
"metadata": {},
34+
"source": [
35+
"### Set initial data\n",
36+
"\n",
37+
"My training data : $y = 0.5x + 0.1 + \\sigma(0,0.1)$"
38+
]
39+
},
40+
{
41+
"cell_type": "code",
42+
"execution_count": 2,
43+
"metadata": {
44+
"collapsed": true
45+
},
46+
"outputs": [],
47+
"source": [
48+
"W_ref = 0.5\n",
49+
"b_ref = 0.1\n",
50+
"nData = 51\n",
51+
"noise_mu = 0\n",
52+
"noise_std = 0.1"
53+
]
54+
},
55+
{
56+
"cell_type": "code",
57+
"execution_count": 3,
58+
"metadata": {
59+
"collapsed": true
60+
},
61+
"outputs": [],
62+
"source": [
63+
"X_train = np.linspace(-2,2,nData)\n",
64+
"Y_test = W_ref * X_train + b_ref\n",
65+
"Y_train = Y_test + np.random.normal(noise_mu, noise_std, nData)"
66+
]
67+
},
68+
{
69+
"cell_type": "markdown",
70+
"metadata": {},
71+
"source": [
72+
"### Plot the data using *matplotlib*"
73+
]
74+
},
75+
{
76+
"cell_type": "code",
77+
"execution_count": 4,
78+
"metadata": {
79+
"collapsed": false
80+
},
81+
"outputs": [],
82+
"source": [
83+
"plt.figure(1)\n",
84+
"plt.plot(X_train, Y_test, 'ro', label='True data')\n",
85+
"plt.plot(X_train, Y_train, 'bo', label='Training data')\n",
86+
"plt.axis('equal')\n",
87+
"plt.legend(loc='lower right')\n",
88+
"plt.show()"
89+
]
90+
},
91+
{
92+
"cell_type": "markdown",
93+
"metadata": {
94+
"collapsed": true
95+
},
96+
"source": [
97+
"### Write a TF graph"
98+
]
99+
},
100+
{
101+
"cell_type": "code",
102+
"execution_count": 5,
103+
"metadata": {
104+
"collapsed": true
105+
},
106+
"outputs": [],
107+
"source": [
108+
"X = tf.placeholder(tf.float32, name=\"input\")\n",
109+
"Y= tf.placeholder(tf.float32, name=\"output\")\n",
110+
"W = tf.Variable(np.random.randn(), name=\"weight\")\n",
111+
"b = tf.Variable(np.random.randn(), name=\"bias\")"
112+
]
113+
},
114+
{
115+
"cell_type": "code",
116+
"execution_count": 6,
117+
"metadata": {
118+
"collapsed": true
119+
},
120+
"outputs": [],
121+
"source": [
122+
"Y_pred = tf.add(tf.mul(X, W), b)"
123+
]
124+
},
125+
{
126+
"cell_type": "markdown",
127+
"metadata": {},
128+
"source": [
129+
"We use a L2 loss function, $loss = -\\Sigma (y'-y)^2$\n",
130+
"\n",
131+
"*reduce_mean(X)* returns the mean value for all elements of the tensor *X*"
132+
]
133+
},
134+
{
135+
"cell_type": "code",
136+
"execution_count": 7,
137+
"metadata": {
138+
"collapsed": true
139+
},
140+
"outputs": [],
141+
"source": [
142+
"loss = tf.reduce_mean(tf.square(Y-Y_pred))"
143+
]
144+
},
145+
{
146+
"cell_type": "code",
147+
"execution_count": 8,
148+
"metadata": {
149+
"collapsed": true
150+
},
151+
"outputs": [],
152+
"source": [
153+
"learning_rate = 0.005\n",
154+
"optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n",
155+
"training_epochs = 50 # We will repeat the learning process 2000 times\n",
156+
"display_epoch = 5 # We will print the error at every 200 epochs"
157+
]
158+
},
159+
{
160+
"cell_type": "markdown",
161+
"metadata": {},
162+
"source": [
163+
"### Run the session"
164+
]
165+
},
166+
{
167+
"cell_type": "code",
168+
"execution_count": 9,
169+
"metadata": {
170+
"collapsed": true
171+
},
172+
"outputs": [],
173+
"source": [
174+
"sess = tf.Session()\n",
175+
"sess.run(tf.initialize_all_variables())"
176+
]
177+
},
178+
{
179+
"cell_type": "code",
180+
"execution_count": 10,
181+
"metadata": {
182+
"collapsed": false,
183+
"scrolled": true
184+
},
185+
"outputs": [
186+
{
187+
"name": "stdout",
188+
"output_type": "stream",
189+
"text": [
190+
"(epoch 5)\n",
191+
"[W, b / loss] 0.4960, 0.0644 / 0.0142\n",
192+
" \n",
193+
"(epoch 10)\n",
194+
"[W, b / loss] 0.5017, 0.1158 / 0.0113\n",
195+
" \n",
196+
"(epoch 15)\n",
197+
"[W, b / loss] 0.5021, 0.1195 / 0.0113\n",
198+
" \n",
199+
"(epoch 20)\n",
200+
"[W, b / loss] 0.5021, 0.1198 / 0.0113\n",
201+
" \n",
202+
"(epoch 25)\n",
203+
"[W, b / loss] 0.5021, 0.1198 / 0.0113\n",
204+
" \n",
205+
"(epoch 30)\n",
206+
"[W, b / loss] 0.5021, 0.1198 / 0.0113\n",
207+
" \n",
208+
"(epoch 35)\n",
209+
"[W, b / loss] 0.5021, 0.1198 / 0.0113\n",
210+
" \n",
211+
"(epoch 40)\n",
212+
"[W, b / loss] 0.5021, 0.1198 / 0.0113\n",
213+
" \n",
214+
"(epoch 45)\n",
215+
"[W, b / loss] 0.5021, 0.1198 / 0.0113\n",
216+
" \n",
217+
"(epoch 50)\n",
218+
"[W, b / loss] 0.5021, 0.1198 / 0.0113\n",
219+
" \n",
220+
"[Final: W, b] 0.5021, 0.1198\n",
221+
"[Final: W, b] 0.5000, 0.1000\n"
222+
]
223+
}
224+
],
225+
"source": [
226+
"for epoch in range(training_epochs):\n",
227+
" for (x,y) in zip(X_train, Y_train):\n",
228+
" sess.run(optimizer, feed_dict={X:x, Y:y})\n",
229+
" \n",
230+
" # Print the result\n",
231+
" if (epoch+1) % display_epoch == 0:\n",
232+
" W_temp = sess.run(W)\n",
233+
" b_temp = sess.run(b)\n",
234+
" loss_temp = sess.run(loss, feed_dict={X: X_train, Y:Y_train}) \n",
235+
" print \"(epoch {})\".format(epoch+1) \n",
236+
" print \"[W, b / loss] {:05.4f}, {:05.4f} / {:05.4f}\".format(W_temp, b_temp, loss_temp) \n",
237+
" print \" \"\n",
238+
"\n",
239+
"# Final results \n",
240+
"W_result = sess.run(W)\n",
241+
"b_result = sess.run(b) \n",
242+
"print \"[Final: W, b] {:05.4f}, {:05.4f}\".format(W_result, b_result)\n",
243+
"print \"[Final: W, b] {:05.4f}, {:05.4f}\".format(W_ref, b_ref)"
244+
]
245+
},
246+
{
247+
"cell_type": "code",
248+
"execution_count": 11,
249+
"metadata": {
250+
"collapsed": true
251+
},
252+
"outputs": [],
253+
"source": [
254+
"plt.figure(2)\n",
255+
"plt.plot(X_train, Y_test, 'ro', label='True data')\n",
256+
"plt.plot(X_train, Y_train, 'bo', label='Training data')\n",
257+
"plt.plot(X_train, W_result*X_train+b_result, 'g-', linewidth=3, label='Regression result')\n",
258+
"plt.axis('equal')\n",
259+
"plt.legend(loc='lower right')\n",
260+
"plt.show()"
261+
]
262+
},
263+
{
264+
"cell_type": "code",
265+
"execution_count": 12,
266+
"metadata": {
267+
"collapsed": false
268+
},
269+
"outputs": [],
270+
"source": [
271+
"sess.close()"
272+
]
273+
}
274+
],
275+
"metadata": {
276+
"kernelspec": {
277+
"display_name": "Python 2",
278+
"language": "python",
279+
"name": "python2"
280+
},
281+
"language_info": {
282+
"codemirror_mode": {
283+
"name": "ipython",
284+
"version": 2
285+
},
286+
"file_extension": ".py",
287+
"mimetype": "text/x-python",
288+
"name": "python",
289+
"nbconvert_exporter": "python",
290+
"pygments_lexer": "ipython2",
291+
"version": "2.7.11"
292+
}
293+
},
294+
"nbformat": 4,
295+
"nbformat_minor": 0
296+
}

0 commit comments

Comments
 (0)