Last updated 2022-06-16 UTC.
# Use seaborn
for pairplot.
pip install - q seaborn
# Use seaborn for pairplot.
pip install -q seaborn
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns # Make NumPy printouts easier to read. np.set_printoptions(precision = 3, suppress = True)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# Make NumPy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow
import keras
from tensorflow.keras
import layers
print(tf.__version__)
First download and import the dataset using pandas:
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin'
]
raw_dataset = pd.read_csv(url, names = column_names,
na_values = '?', comment = '\t',
sep = ' ', skipinitialspace = True)
dataset = raw_dataset.copy() dataset.tail()
This is a corrected version of your code:
import tensorflow as tf # 2.0 .0 - alpha0 import numpy as np x_data = np.random.randn(5, 2) w_real = 0.7 # coefficients b_real = -0.2 # global bias noise = np.random.randn(5, 2) * 0.01 # level of noise y_data = w_real * x_data + b_real + noise class SimpleRegressionNN(tf.keras.Model): def __init__(self): super(SimpleRegressionNN, self).__init__() self.output_layer = tf.keras.layers.Dense(1, input_shape = (2, )) def call(self, data_input): result = self.output_layer(data_input) return result reg_loss = tf.keras.losses.MeanSquaredError() reg_optimiser = tf.keras.optimizers.SGD(0.1) nn_regressor = SimpleRegressionNN() @tf.function def train_step(x_sample, y_sample): with tf.GradientTape() as tape: predictions = nn_regressor(x_sample) loss = reg_loss(y_sample, predictions) gradients = tape.gradient(loss, nn_regressor.trainable_variables) # had to indent this! reg_optimiser.apply_gradients(zip(gradients, nn_regressor.trainable_variables)) return loss for x_point, y_point in zip(x_data, y_data): # batch of 1 x_point, y_point = tf.convert_to_tensor([x_point]), tf.convert_to_tensor([y_point]) mse = train_step(x_sample = x_point, y_sample = y_point) print("MSE: {}".format(mse.numpy()))
Last Updated : 23 Aug, 2022
Output:
I'm learning Tensorflow 2.0 and I thought anycodings_machine-learning that it would be a good idea to implement anycodings_machine-learning the most basic simple linear regression in anycodings_machine-learning Tensorflow. Unfortunately, I ran into anycodings_machine-learning several issues and I was wondering if anyone anycodings_machine-learning here might be able to help.,Is the general approach reasonable or am I doing anything odd here? (ignoring the batch size and the fact that I have to validation data, this is just a toy example),Custom Method implementation in Spring Data with MongoDB,In addition to this problem, I have a few anycodings_machine-learning extra questions:
Consider the following set up:
import tensorflow as tf # 2.0 .0 - alpha0 import numpy as np x_data = np.random.randn(2000, 1) w_real = [0.7] # coefficients b_real = -0.2 # global bias noise = np.random.randn(1, 2000) * 0.5 # level of noise y_data = np.matmul(w_real, x_data.T) + b_real + noise
Now on with the model definition:
# modelling this data with tensorflow(manually!) class SimpleRegressionNN(tf.keras.Model): def __init__(self): super(SimpleRegressionNN, self).__init__() self.input_layer = tf.keras.layers.Input self.output_layer = tf.keras.layers.Dense(1) def call(self, data_input): model = self.input_layer(data_input) model = self.output_layer(model) # open question: how to account for the intercept / bias term ? # Ideally, we 'd want to generate preds as matmult(X,W) + b return model nn_regressor = SimpleRegressionNN() reg_loss = tf.keras.losses.MeanSquaredError() reg_optimiser = tf.keras.optimizers.SGD(0.1) metric_accuracy = tf.keras.metrics.mean_squared_error # define forward step @tf.function def train_step(x_sample, y_sample): with tf.GradientTape() as tape: predictions = nn_regressor(x_sample) loss = reg_loss(y_sample, predictions) gradients = tape.gradient(loss, nn_regressor.trainable_variables) # had to indent this! reg_optimiser.apply_gradients(zip(gradients, nn_regressor.trainable_variables)) metric_accuracy(y_sample, predictions) # % % # run the model for epoch in range(10): for x_point, y_point in zip(x_data.T[0], y_data[0]): # batch of 1 train_step(x_sample = x_point, y_sample = y_point) print("MSE: {}".format(metric_accuracy.result()))
Unfortunately, I'm getting the following anycodings_machine-learning error:
TypeError: You are attempting to use Python control flow in a layer that was not declared to be dynamic.Pass `dynamic=True` to the class constructor. Encountered error: "" " Tensor objects are only iterable when eager execution is enabled.To iterate over this tensor use tf.map_fn. "" "
This is a corrected version of your anycodings_python code:
import tensorflow as tf # 2.0 .0 - alpha0 import numpy as np x_data = np.random.randn(5, 2) w_real = 0.7 # coefficients b_real = -0.2 # global bias noise = np.random.randn(5, 2) * 0.01 # level of noise y_data = w_real * x_data + b_real + noise class SimpleRegressionNN(tf.keras.Model): def __init__(self): super(SimpleRegressionNN, self).__init__() self.output_layer = tf.keras.layers.Dense(1, input_shape = (2, )) def call(self, data_input): result = self.output_layer(data_input) return result reg_loss = tf.keras.losses.MeanSquaredError() reg_optimiser = tf.keras.optimizers.SGD(0.1) nn_regressor = SimpleRegressionNN() @tf.function def train_step(x_sample, y_sample): with tf.GradientTape() as tape: predictions = nn_regressor(x_sample) loss = reg_loss(y_sample, predictions) gradients = tape.gradient(loss, nn_regressor.trainable_variables) # had to indent this! reg_optimiser.apply_gradients(zip(gradients, nn_regressor.trainable_variables)) return loss for x_point, y_point in zip(x_data, y_data): # batch of 1 x_point, y_point = tf.convert_to_tensor([x_point]), tf.convert_to_tensor([y_point]) mse = train_step(x_sample = x_point, y_sample = y_point) print("MSE: {}".format(mse.numpy()))
25th March, 2020: Initial version
... # set up variable for weights w0 = tf.Variable(0.0, name = "w0") w1 = tf.Variable(0.0, name = "w1") ... # Define the operation that will be called on each iteration train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(costF) # set up a session sess = tf.Session() # initialize all variables init = tf.global_variables_initializer() # execute the session sess.run(init) # Loop through the data training for epoch in range(training_epochs): for (x, y) in zip(x_train, y_train): # execute the session sess.run(train_op, feed_dict = { X: x, Y: y }) # get values of the final weights by executing the session w_val_0 = sess.run(w0) w_val_1 = sess.run(w1)
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt learning_rate = 0.01 # steps of looping through all your data to update the parameters training_epochs = 100 # the training set x_train = np.linspace(0, 10, 100) y_train = x_train + np.random.normal(0, 1, 100) w0 = tf.Variable(0.) w1 = tf.Variable(0.) def h(x): y = w1 * x + w0 return y def squared_error(y_pred, y_true): return tf.reduce_mean(tf.square(y_pred - y_true)) # train model for epoch in range(training_epochs): with tf.GradientTape() as tape: y_predicted = h(x_train) costF = squared_error(y_predicted, y_train) # get gradients gradients = tape.gradient(costF, [w1, w0]) # compute and adjust weights w1.assign_sub(gradients[0] * learning_rate) w0.assign_sub(gradients[1] * learning_rate) plt.scatter(x_train, y_train) # plot the best fit line plt.plot(x_train, h(x_train), 'r') plt.show()
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt learning_rate = 0.05 training_epochs = 100 #the traning set x_train = np.linspace(-1, 1, 101) # Set up raw output data based on a degree 5 polynomial num_coeffs = 6 trY_coeffs = [1, 2, 3, 4, 5, 6] y_train = 0 for i in range(num_coeffs): y_train += trY_coeffs[i] * np.power(x_train, i) # Add some noise y_train += np.random.randn( * x_train.shape) * 1.5 # Set up the weight vector to all zeros w = tf.Variable([0.] * num_coeffs, name = "parameters") # our model function def h(x): # h(x) = w5 * x * x * x * x * x + w4 * x * x * x * x + w3 * x * x * x + w2 * x * x + w1 * x + w0 y = 0 for i in range(num_coeffs): y += w[i] * pow(x, i) return y # cost function def squared_error(y_pred, y_true): return tf.reduce_mean(tf.square(y_pred - y_true)) # train model for epoch in range(training_epochs): with tf.GradientTape() as tape: y_predicted = h(x_train) costF = squared_error(y_predicted, y_train) # get gradients gradients = tape.gradient(costF, w) # compute and adjust weights w.assign_sub(gradients * learning_rate) plt.scatter(x_train, y_train) # plot the best fit line plt.plot(x_train, h(x_train), 'r') plt.show()
In this tutorial we are going to write Liner Regression example code from scratch, we will write our own model for liner regression problem.,In this section we will show you how you can write your own Linear Regression model in TensorFlow 2. You will learn to develop your own model, generate data, train and validate Linear Regression Model in TensorFlow 2.,Here is the complete code of the Linear Regression model we developed in TensorFlow 2.0:,The next step is to generate or load the data for training the model. If you are working on the commercial project then you will have to write program for pre-processing of data. In this example we are generating data with following code:
First all install TensorFlow 2.x on your computer as we will use TensorFlow 2 for developing the program. You can check our tutorial Install TensorFlow 2.3.0 on Google Colab if TensorFlow is not installed on your computer. Here are the libraries you should import in your program:
#Import required Libraries
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
Now we will define our Linear Regression model with the help of following code:
# Define a Linear model
class LinearModel(object):
def __init__(self):
self.W = tf.Variable(12.0)
self.b = tf.Variable(-6.1)
def __call__(self, inputs):
return self.W * inputs + self.b
Now we will define the cost funtion for our model. Here is the code of cost function:
#Define Loss Function
def compute_loss(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
Now we will define the weight and bias with the help of following code:
# Define the weight and bias weight = 2.5 bias = 1.0
The next step is to generate or load the data for training the model. If you are working on the commercial project then you will have to write program for pre-processing of data. In this example we are generating data with following code:
# Generate Data data = 100 inputs = tf.random.normal(shape = [data]) noise = tf.random.normal(shape = [data]) outputs = inputs * weight + bias + noise