#!/usr/bin/python3 import pandas as pd import tensorflow as tf import numpy import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' rnd = numpy.random learning_rate = .01 training_epochs = 2000 chkpoint = 250 train_df = pd.read_csv("odometer.csv") X = tf.placeholder("float") Y = tf.placeholder("float") W = tf.Variable(rnd.randn(), name="weight") b = tf.Variable(rnd.randn(), name="bias") # model: Y = X*W + b pred = tf.add(tf.multiply(X, W), b) # mean squared error total = len(train_df.index) cost = tf.reduce_sum( tf.pow(pred-Y, 2))/(2*total) # normalize training set nn_offset=int(train_df[['date']].min()) nn_div=int(train_df[['date']].max() - train_df[['date']].min()) print("norm_off=", nn_offset) print("norm_mult=", nn_div) train_df['date'] -= nn_offset train_df['date'] /= nn_div opt = tf.train.GradientDescentOptimizer( learning_rate).minimize(cost) init = tf.global_variables_initializer() # tensorflow session with tf.Session() as sess: sess.run(init) for epoch in range(training_epochs): for ix, row in train_df.iterrows(): sess.run(opt, feed_dict={ X: row['date'], Y: row['miles']}) if epoch % chkpoint == 0: c=sess.run(cost, feed_dict={ X: train_df['date'], Y: train_df['miles']}) print("W=", sess.run(W), "b=", sess.run(b), "cost=", c)