# Linear Regression Part 3

By Mohendra Roy

In [1]:

```
''' Here we are going to implement the Linear regression in TensorFlow '''
import numpy as np
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sess = tf.Session()
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import pylab as plb
from IPython.display import Image, display
# initial variable for weights and biases w and b = 0.0
w = tf.Variable(0.0) # Here w is the parameter of the hypothesis H = aX + b where w = a and b is the bias
'''
If you go through our earlier classes of linear regression then you will find that we use P1 instate of w.
So, don’t be confuse with the nomenclatures.
'''
b = tf.Variable(0.0) # Here b is the bias
''' In our earlier class, we used P2 instead of b '''
init = tf.global_variables_initializer() # handler to initiate the global variable
sess.run(init) # initiating the global variables
wini = sess.run(w) # Storing the initial weight value in wini
bini = sess.run(b) # Storing the initial bias in bini
# initial placeholder for input data and labels
x = tf.placeholder(tf.float32) # to store X data
y = tf.placeholder(tf.float32) # to Store Y data ( or label )
InX = [1, 2, 3, 4, 5]
InY = [10, 40, 50, 78, 83]
# The hypothesis / linear model
lm = x*w + b
''' In our earlier class we used H = P1*X + P2 '''
# Setting the desired goal
loss = tf.reduce_sum(tf.square(lm - y))
lossini = sess.run(loss, {x: InX, y: InY }) # stoning the initial loss due to initial weight and bias
''' Our desired goal is to reduce the difference between the training input and training label
To achieve this, we will use an optimization method using the Gradient Descent Optimizer algorithm
'''
# Optimization with Gradient Descent Optimizer
op = tf.train.GradientDescentOptimizer(0.01) # The number 0.01 here is the step at which the optimization will descent
ob = op.minimize(loss) # The objective(ob) is to minimize the loss
''' Our main objective is to reduce the loss, i.e. the difference between the training input and label
Now we will train our model in a loop to get the minimum loss by updating weight and biases
'''
# Training loop
for i in range(1000):
sess.run(ob, {x: InX, y: InY })
wfi = sess.run(w) # final weight after training
bfi = sess.run(b) # final bias after training
# Evaluation of the loss after optimizing the parameters i.e. after optimizing the weight and bias.
lossfi = sess.run(loss, { x: InX, y: InY }) # loss after the training
print("Initial weight: %s, Bias: %s, Loss before optimization: %s " % (wini, bini, lossini)) # The weight, bias and loss before training
print("Final weight: %s, Bias: %s, Loss after optimization: %s " % (wfi, bfi, lossfi)) # The weight, bias and loss after training
''' Till this we just train our 1st linear model. However we want to test the model too '''
# Ploting the data and optimized model.
X = np.array(InX) # Converting the input X into an array
Y = np.array(InY) # Converting the input Y into an array
Hf = wfi*X + bfi # Final hypothesis with optimized weight and biases
line1, = plt.plot(X,Y, "ro", markersize=5, label='Original Data')
line2, = plt.plot(X,Hf, label='Optimised Model')
plt.title('X vs Y')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)})
plt.show()
```