# neuralnetbackprop.py
# uses Python version 2.7.8
import random
import math
# ------------------------------------
def show_data(matrix, num_first_rows):
#for i in range(len(matrix)):
for i in range(0, num_first_rows-1):
print "[" + str(i).rjust(2) + "]",
for j in range(len(matrix[i])):
print str("%.1f" % matrix[i][j]).rjust(5),
print "\n",
print "........"
last_row = len(matrix) - 1
print "[" + str(last_row).rjust(2) + "]",
for j in range(len(matrix[last_row])):
print str("%.1f" % matrix[last_row][j]).rjust(5),
print "\n"
def show_vector(vector):
for i in range(len(vector)):
if i % 8 == 0: # 8 columns
print "\n",
if vector[i] >= 0.0:
print '',
print "%.4f" % vector[i], # 4 decimals
print "\n"
# ------------------------------------
class NeuralNetwork:
def __init__(self, num_input, num_hidden, num_output):
self.num_input = num_input
self.num_hidden = num_hidden
self.num_output = num_output
self.inputs = [0 for i in range(num_input)]
self.ih_weights = self.make_matrix(num_input, num_hidden)
self.h_biases = [0 for i in range(num_hidden)]
self.h_outputs = [0 for i in range(num_hidden)]
self.ho_weights = self.make_matrix(num_hidden, num_output)
self.o_biases = [0 for i in range(num_output)]
self.outputs = [0 for i in range(num_output)]
# random.seed(0) # hidden function is 'normal' approach
self.rnd = random.Random(0) # allows multiple instances
self.initialize_weights()
def make_matrix(self, rows, cols):
result = [[0 for j in range(cols)] for i in range(rows)]
return result
def set_weights(self, weights):
k = 0
for i in range(self.num_input):
for j in range(self.num_hidden):
self.ih_weights[i][j] = weights[k]
k += 1
for i in range(self.num_hidden):
self.h_biases[i] = weights[k]
k += 1
for i in range(self.num_hidden):
for j in range(self.num_output):
self.ho_weights[i][j] = weights[k]
k += 1
for i in range(self.num_output):
self.o_biases[i] = weights[k]
k += 1
def get_weights(self):
num_wts = ((self.num_input * self.num_hidden) + self.num_hidden +
(self.num_hidden * self.num_output) + self.num_output)
result = [0 for i in range(num_wts)]
k = 0
for i in range(self.num_input):
for j in range(self.num_hidden):
result[k] = self.ih_weights[i][j]
k += 1
for i in range(self.num_hidden):
result[k] = self.h_biases[i]
k += 1
for i in range(self.num_hidden):
for j in range(self.num_output):
result[k] = self.ho_weights[i][j]
k += 1
for i in range(self.num_output):
result[k] = self.o_biases[i]
k += 1
return result
def initialize_weights(self):
num_wts = ((self.num_input * self.num_hidden) + self.num_hidden +
(self.num_hidden * self.num_output) + self.num_output)
wts = [0 for i in range(num_wts)]
lo = -0.01
hi = 0.01
for i in range(len(wts)):
wts[i] = (hi - lo) * self.rnd.random() + lo
self.set_weights(wts)
def compute_outputs(self, x_values):
h_sums = [0 for i in range(self.num_hidden)]
o_sums = [0 for i in range(self.num_output)]
for i in range(len(x_values)):
self.inputs[i] = x_values[i]
for j in range(self.num_hidden):
for i in range(self.num_input):
h_sums[j] += (self.inputs[i] * self.ih_weights[i][j])
for i in range(self.num_hidden):
h_sums[i] += self.h_biases[i]
for i in range(self.num_hidden):
self.h_outputs[i] = self.hypertan(h_sums[i])
for j in range(self.num_output):
for i in range(self.num_hidden):
o_sums[j] += (self.h_outputs[i] * self.ho_weights[i][j])
for i in range(self.num_output):
o_sums[i] += self.o_biases[i]
soft_out = self.softmax(o_sums)
for i in range(self.num_output):
self.outputs[i] = soft_out[i]
result = [0 for i in range(self.num_output)]
for i in range(self.num_output):
result[i] = self.outputs[i]
return result
def hypertan(self, x):
if x < -20.0:
return -1.0
elif x > 20.0:
return 1.0
else:
return math.tanh(x)
def softmaxnaive(self, o_sums):
div = 0
for i in range(len(o_sums)):
div = div + math.exp(o_sums[i])
result = [0 for i in range(len(o_sums))]
for i in range(len(o_sums)):
result[i] = math.exp(o_sums[i]) / div
return result
def softmax(self, o_sums):
m = max(o_sums)
scale = 0
for i in range(len(o_sums)):
scale = scale + (math.exp(o_sums[i] - m))
result = [0 for i in range(len(o_sums))]
for i in range(len(o_sums)):
result[i] = math.exp(o_sums[i] - m) / scale
return result
def train(self, train_data, max_epochs, learn_rate, momentum):
o_grads = [0 for i in range(self.num_output)] # gradients
h_grads = [0 for i in range(self.num_hidden)]
ih_prev_weights_delta = self.make_matrix(num_input, num_hidden) # momentum
h_prev_biases_delta = [0 for i in range(self.num_hidden)]
ho_prev_weights_delta = self.make_matrix(num_hidden, num_output)
o_prev_biases_delta = [0 for i in range(self.num_output)]
epoch = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
sequence = [i for i in range(len(train_data))]
while epoch < max_epochs:
self.rnd.shuffle(sequence)
for ii in range(len(train_data)):
idx = sequence[ii]
for j in range(self.num_input): # peel off x_values
x_values[j] = train_data[idx][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = train_data[idx][j + self.num_input]
self.compute_outputs(x_values) # outputs stored internally
# --- update-weights (back-prop) section
for i in range(self.num_output): # 1. compute output gradients
derivative = (1 - self.outputs[i]) * self.outputs[i]
o_grads[i] = derivative * (t_values[i] - self.outputs[i])
for i in range(self.num_hidden): # 2. compute hidden gradients
derivative = (1 - self.h_outputs[i]) * (1 + self.h_outputs[i])
sum = 0
for j in range(self.num_output):
x = o_grads[j] * self.ho_weights[i][j]
sum += x
h_grads[i] = derivative * sum
for i in range(self.num_input): # 3a. update input-hidden weights
for j in range(self.num_hidden):
delta = learn_rate * h_grads[j] * self.inputs[i]
self.ih_weights[i][j] += delta
self.ih_weights[i][j] += momentum * ih_prev_weights_delta[i][j] # momentum
ih_prev_weights_delta[i][j] = delta # save the delta for momentum
for i in range(self.num_hidden): # 3b. update hidden biases
delta = learn_rate * h_grads[i]
self.h_biases[i] += delta
self.h_biases[i] += momentum * h_prev_biases_delta[i]; # momentum
h_prev_biases_delta[i] = delta # save the delta
for i in range(self.num_hidden): # 4a. update hidden-output weights
for j in range(self.num_output):
delta = learn_rate * o_grads[j] * self.h_outputs[i]
self.ho_weights[i][j] += delta
self.ho_weights[i][j] += momentum * ho_prev_weights_delta[i][j]; # momentum
ho_prev_weights_delta[i][j] = delta # save
for i in range(self.num_output): # 4b. update output biases
delta = learn_rate * o_grads[i]
self.o_biases[i] += delta
self.o_biases[i] += momentum * o_prev_biases_delta[i] # momentum
o_prev_biases_delta[i] = delta # save
# --- end update-weights
epoch += 1
result = self.get_weights()
return result
def accuracy(self, data):
num_correct = 0
num_wrong = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
for i in range(len(data)):
for j in range(self.num_input): # peel off x_values
x_values[j] = data[i][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = data[i][j + self.num_input]
y_values = self.compute_outputs(x_values)
max_index = y_values.index(max(y_values))
if t_values[max_index] == 1.0:
num_correct += 1;
else:
num_wrong += 1;
return (num_correct * 1.0) / (num_correct + num_wrong)
# ------------------------------------
print "\nBegin neural network using Python demo"
print "\nGoal is to predict species from color, petal length, petal width \n"
print "The 30-item raw data looks like: \n"
print "[0] blue, 1.4, 0.3, setosa"
print "[1] pink, 4.9, 1.5, versicolor"
print "[2] teal, 5.6, 1.8, virginica"
print ". . ."
print "[29] pink, 5.9, 1.5, virginica"
train_data = ([[0 for j in range(7)]
for i in range(24)]) # 24 rows, 7 cols
train_data[0] = [ 1, 0, 1.4, 0.3, 1, 0, 0 ]
train_data[1] = [ 0, 1, 4.9, 1.5, 0, 1, 0 ]
train_data[2] = [ -1, -1, 5.6, 1.8, 0, 0, 1 ]
train_data[3] = [ -1, -1, 6.1, 2.5, 0, 0, 1 ]
train_data[4] = [ 1, 0, 1.3, 0.2, 1, 0, 0 ]
train_data[5] = [ 0, 1, 1.4, 0.2, 1, 0, 0 ]
train_data[6] = [ 1, 0, 6.6, 2.1, 0, 0, 1 ]
train_data[7] = [ 0, 1, 3.3, 1.0, 0, 1, 0 ]
train_data[8] = [ -1, -1, 1.7, 0.4, 1, 0, 0 ]
train_data[9] = [ 0, 1, 1.5, 0.1, 0, 1, 1 ]
train_data[10] = [ 0, 1, 1.4, 0.2, 1, 0, 0 ]
train_data[11] = [ 0, 1, 4.5, 1.5, 0, 1, 0 ]
train_data[12] = [ 1, 0, 1.4, 0.2, 1, 0, 0 ]
train_data[13] = [ -1, -1, 5.1, 1.9, 0, 0, 1 ]
train_data[14] = [ 1, 0, 6.0, 2.5, 0, 0, 1 ]
train_data[15] = [ 1, 0, 3.9, 1.4, 0, 1, 0 ]
train_data[16] = [ 0, 1, 4.7, 1.4, 0, 1, 0 ]
train_data[17] = [ -1, -1, 4.6, 1.5, 0, 1, 0 ]
train_data[18] = [ -1, -1, 4.5, 1.7, 0, 0, 1 ]
train_data[19] = [ 0, 1, 4.5, 1.3, 0, 1, 0 ]
train_data[20] = [ 1, 0, 1.5, 0.2, 1, 0, 0 ]
train_data[21] = [ 0, 1, 5.8, 2.2, 0, 0, 1 ]
train_data[22] = [ 0, 1, 4.0, 1.3, 0, 1, 0 ]
train_data[23] = [ -1, -1, 5.8, 1.8, 0, 0, 1 ]
test_data = ([[0 for j in range(7)]
for i in range(6)]) # 6 rows, 7 cols
test_data[0] = [ 1, 0, 1.5, 0.2, 1, 0, 0 ]
test_data[1] = [ -1, -1, 5.9, 2.1, 0, 0, 1 ]
test_data[2] = [ 0, 1, 1.4, 0.2, 1, 0, 0 ]
test_data[3] = [ 0, 1, 4.7, 1.6, 0, 1, 0 ]
test_data[4] = [ 1, 0, 4.6, 1.3, 0, 1, 0 ]
test_data[5] = [ 1, 0, 6.3, 1.8, 0, 0, 1 ]
print "\nFirst few lines of encoded training data are: \n"
show_data(train_data, 4)
print "\nThe encoded test data is: \n"
show_data(test_data, 5)
print "\nCreating a 4-input, 5-hidden, 3-output neural network"
print "Using tanh and softmax activations \n"
num_input = 4
num_hidden = 5
num_output = 3
nn = NeuralNetwork(num_input, num_hidden, num_output)
max_epochs = 70 # artificially small
learn_rate = 0.08 # artificially large
momentum = 0.01
print "Setting max_epochs = " + str(max_epochs)
print "Setting learn_rate = " + str(learn_rate)
print "Setting momentum = " + str(momentum)
print "\nBeginning training using back-propagation"
weights = nn.train(train_data, max_epochs, learn_rate, momentum)
print "Training complete \n"
print "Final neural network weights and bias values:"
show_vector(weights)
print "Model accuracy on training data =",
acc_train = nn.accuracy(train_data)
print "%.4f" % acc_train
print "Model accuracy on test data =",
acc_test = nn.accuracy(test_data)
print "%.4f" % acc_test
print "\nEnd back-prop demo \n"
Internal-ship Program
Monday, July 4, 2016
Prediction using neural network in python
# neuralnetbackprop(Weather Forcasting using neural network)
# uses Python version 2.7.8
import random
import math
import csv, sys
from csv import DictReader
import sys
from scipy import optimize
from StringIO import StringIO
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# ------------------------------------
def show_data(matrix, num_first_rows):
#for i in range(len(matrix)):
for i in range(0, num_first_rows-1):
print "[" + str(i).rjust(2) + "]",
for j in range(len(matrix[i])):
print str( matrix[i][j]).rjust(5),
print "\n",
print "........"
last_row = len(matrix) - 1
print "[" + str(last_row).rjust(2) + "]",
for j in range(len(matrix[last_row])):
print str( matrix[last_row][j]).rjust(5),
print "\n"
def show_vector(vector):
for i in range(len(vector)):
if i % 8 == 0: # 8 columns
print "\n",
if vector[i] >= 0.0:
print '',
print "%.4f" % vector[i], # 4 decimals
print "\n"
# ------------------------------------
class NeuralNetwork:
def __init__(self, num_input, num_hidden, num_output):
self.num_input = num_input
self.num_hidden = num_hidden
self.num_output = num_output
self.inputs = [0 for i in range(num_input)]
self.ih_weights = self.make_matrix(num_input, num_hidden)
self.h_biases = [0 for i in range(num_hidden)]
self.h_outputs = [0 for i in range(num_hidden)]
self.ho_weights = self.make_matrix(num_hidden, num_output)
self.o_biases = [0 for i in range(num_output)]
self.outputs = [0 for i in range(num_output)]
# random.seed(0) # hidden function is 'normal' approach
self.rnd = random.Random(0) # allows multiple instances
self.initialize_weights()
def make_matrix(self, rows, cols):
result = [[0 for j in range(cols)] for i in range(rows)]
return result
def set_weights(self, weights):
k = 0
for i in range(self.num_input):
for j in range(self.num_hidden):
self.ih_weights[i][j] = weights[k]
k += 1
for i in range(self.num_hidden):
self.h_biases[i] = weights[k]
k += 1
for i in range(self.num_hidden):
for j in range(self.num_output):
self.ho_weights[i][j] = weights[k]
k += 1
for i in range(self.num_output):
self.o_biases[i] = weights[k]
k += 1
def get_weights(self):
num_wts = ((self.num_input * self.num_hidden) + self.num_hidden +
(self.num_hidden * self.num_output) + self.num_output)
result = [0 for i in range(num_wts)]
k = 0
for i in range(self.num_input):
for j in range(self.num_hidden):
result[k] = self.ih_weights[i][j]
k += 1
for i in range(self.num_hidden):
result[k] = self.h_biases[i]
k += 1
for i in range(self.num_hidden):
for j in range(self.num_output):
result[k] = self.ho_weights[i][j]
k += 1
for i in range(self.num_output):
result[k] = self.o_biases[i]
k += 1
return result
def initialize_weights(self):
num_wts = ((self.num_input * self.num_hidden) + self.num_hidden +
(self.num_hidden * self.num_output) + self.num_output)
wts = [0 for i in range(num_wts)]
lo = -0.01
hi = 0.01
for i in range(len(wts)):
wts[i] = (hi - lo) * self.rnd.random() + lo
self.set_weights(wts)
def compute_outputs(self, x_values):
h_sums = [0 for i in range(self.num_hidden)]
o_sums = [0 for i in range(self.num_output)]
for i in range(len(x_values)):
self.inputs[i] = x_values[i]
for j in range(self.num_hidden):
for i in range(self.num_input):
h_sums[j] += (float(self.inputs[i]) * self.ih_weights[i][j])
for i in range(self.num_hidden):
h_sums[i] += self.h_biases[i]
for i in range(self.num_hidden):
self.h_outputs[i] = self.hypertan(h_sums[i])
for j in range(self.num_output):
for i in range(self.num_hidden):
o_sums[j] += (self.h_outputs[i] * self.ho_weights[i][j])
for i in range(self.num_output):
o_sums[i] += self.o_biases[i]
soft_out = self.softmax(o_sums)
for i in range(self.num_output):
self.outputs[i] = soft_out[i]
result = [0 for i in range(self.num_output)]
for i in range(self.num_output):
result[i] = self.outputs[i]
return result
def hypertan(self, x):
if x < -20.0:
return -1.0
elif x > 20.0:
return 1.0
else:
return math.tanh(x)
def softmaxnaive(self, o_sums):
div = 0
for i in range(len(o_sums)):
div = div + math.exp(o_sums[i])
result = [0 for i in range(len(o_sums))]
for i in range(len(o_sums)):
result[i] = math.exp(o_sums[i]) / div
return result
def softmax(self, o_sums):
m = max(o_sums)
scale = 0
for i in range(len(o_sums)):
scale = scale + (math.exp(o_sums[i] - m))
result = [0 for i in range(len(o_sums))]
for i in range(len(o_sums)):
result[i] = math.exp(o_sums[i] - m) / scale
return result
def train(self, train_data, max_epochs, learn_rate, momentum):
o_grads = [0 for i in range(self.num_output)] # gradients
h_grads = [0 for i in range(self.num_hidden)]
ih_prev_weights_delta = self.make_matrix(num_input, num_hidden) # momentum
h_prev_biases_delta = [0 for i in range(self.num_hidden)]
ho_prev_weights_delta = self.make_matrix(num_hidden, num_output)
o_prev_biases_delta = [0 for i in range(self.num_output)]
epoch = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
sequence = [i for i in range(len(train_data))]
while epoch < max_epochs:
self.rnd.shuffle(sequence)
for ii in range(len(train_data)):
idx = sequence[ii]
for j in range(self.num_input): # peel off x_values
x_values[j] = train_data[idx][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = train_data[idx][j + self.num_input]
self.compute_outputs(x_values) # outputs stored internally
# --- update-weights (back-prop) section
for i in range(self.num_output): # 1. compute output gradients
derivative = (1 - self.outputs[i]) * self.outputs[i]
o_grads[i] = derivative * (float(t_values[i]) - float(self.outputs[i]))
for i in range(self.num_hidden): # 2. compute hidden gradients
derivative = (1 - self.h_outputs[i]) * (1 + self.h_outputs[i])
sum = 0
for j in range(self.num_output):
x = o_grads[j] * self.ho_weights[i][j]
sum += x
h_grads[i] = derivative * sum
for i in range(self.num_input): # 3a. update input-hidden weights
for j in range(self.num_hidden):
delta = learn_rate * h_grads[j] * float(self.inputs[i])
self.ih_weights[i][j] += delta
self.ih_weights[i][j] += momentum * ih_prev_weights_delta[i][j] # momentum
ih_prev_weights_delta[i][j] = delta # save the delta for momentum
for i in range(self.num_hidden): # 3b. update hidden biases
delta = learn_rate * h_grads[i]
self.h_biases[i] += delta
self.h_biases[i] += momentum * h_prev_biases_delta[i]; # momentum
h_prev_biases_delta[i] = delta # save the delta
for i in range(self.num_hidden): # 4a. update hidden-output weights
for j in range(self.num_output):
delta = learn_rate * o_grads[j] * self.h_outputs[i]
self.ho_weights[i][j] += delta
self.ho_weights[i][j] += momentum * ho_prev_weights_delta[i][j]; # momentum
ho_prev_weights_delta[i][j] = delta # save
for i in range(self.num_output): # 4b. update output biases
delta = learn_rate * o_grads[i]
self.o_biases[i] += delta
self.o_biases[i] += momentum * o_prev_biases_delta[i] # momentum
o_prev_biases_delta[i] = delta # save
# --- end update-weights
epoch += 1
result = self.get_weights()
return result
def accuracy(self, data):
num_correct = 0
num_wrong = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
for i in range(len(data)):
for j in range(self.num_input): # peel off x_values
x_values[j] = data[i][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = data[i][j + self.num_input]
y_values = self.compute_outputs(x_values)
max_index = y_values.index(max(y_values))
if (float(t_values[max_index]) < 0.85) & (float(t_values[max_index]) > 0.15):
num_correct += 1;
else:
num_wrong += 1;
return (num_correct * 1.0) / (num_correct + num_wrong)
def result(self, data):
num_correct = 0
num_wrong = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
result = ""
for i in range(len(data)):
for j in range(self.num_input): # peel off x_values
x_values[j] = data[i][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = data[i][j + self.num_input]
y_values = self.compute_outputs(x_values)
for i in range(len(y_values)):
result += str(y_values[i])+","
return result
# ------------------------------------
print "\nNeural network using Python"
print "\nGoal is to predict Wind Data \n"
print "The 30-item raw data looks like: \n"
print "[0] Anuradhapura, 2006, Jan, 3.5"
print "[1] Anuradhapura, 2006, Feb, 3.5"
print "[2] Anuradhapura, 2006, Mar, 3.1"
print ". . ."
print "[29] Batticaloa, 2006, June, 6.0"
train_data = ([[0 for j in range(37)]
for i in range(24)]) # 24 rows, 7 cols
train_data[0] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.13]
train_data[1] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.13]
train_data[2] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.11]
train_data[3] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.17]
train_data[4] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.3]
train_data[5] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.33]
train_data[6] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.36]
train_data[7] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.35]
train_data[8] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.32]
train_data[9] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.19]
train_data[10] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.09]
train_data[11] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.13]
train_data[12] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.21]
train_data[13] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.22]
train_data[14] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,1.1]
train_data[15] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.19]
train_data[16] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.29]
train_data[17] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.34]
train_data[18] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.49]
train_data[19] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.35]
train_data[20] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.39]
train_data[21] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.22]
train_data[22] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.15]
train_data[23] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.3]
test_data = ([[0 for j in range(37)]
for i in range(12)]) # 6 rows, 37 cols
test_data[0] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,0,0,0,1,0.17]
test_data[1] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,0,0,1,0,0.13]
test_data[2] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,0,1,0,0,0.14]
test_data[3] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,1,0,0,0,0.38]
test_data[4] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,1,0,0,0,0,0.22]
test_data[5] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,1,0,0,0,0,0,0.32]
test_data[6] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,1,0,0,0,0,0,0,0.34]
test_data[7] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,1,0,0,0,0,0,0,0,0.29]
test_data[8] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,1,0,0,0,0,0,0,0,0,0.25]
test_data[9] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,1,0,0,0,0,0,0,0,0,0,0.12]
test_data[10] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,1,0,0,0,0,0,0,0,0,0,0,0.09]
test_data[11] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,1,0,0,0,0,0,0,0,0,0,0,0,0.13]
print "\nFirst few lines of encoded training data are: \n"
show_data(train_data, 4)
print "\nThe encoded test data is: \n"
show_data(test_data, 5)
print "\nCreating a 36-input, 37-hidden, 1-output neural network"
print "Using tanh and softmax activations \n"
num_input = 36
num_hidden = 37
num_output = 1
nn = NeuralNetwork(num_input, num_hidden, num_output)
max_epochs = 70 # artificially small
learn_rate = 0.08 # artificially large
momentum = 0.01
print "Setting max_epochs = " + str(max_epochs)
print "Setting learn_rate = " + str(learn_rate)
print "Setting momentum = " + str(momentum)
print "\nBeginning training using back-propagation"
weights = nn.train(train_data, max_epochs, learn_rate, momentum)
print "Training complete \n"
print "Final neural network weights and bias values:"
show_vector(weights)
print "Model accuracy on training data =",
acc_train = nn.accuracy(train_data)
print "%.4f" % acc_train
print "Model accuracy on test data =",
acc_test = nn.accuracy(test_data)
print "%.4f" % acc_test
print "---------------------------------------"
print "TESTING"
output = nn.result(test_data)
print output
filename2= 'datafile.csv'
with open(filename2,'rb') as f:
clm=[row["a"] for row in DictReader(f)]
x=[]
for i in clm:
x.append(float(i))
with open(filename2,'rb') as f:
clm1=[row["b"] for row in DictReader(f)]
y=[]
for i in clm1:
y.append(float(i))
with open(filename2,'rb') as f:
clm2=[row["c"] for row in DictReader(f)]
z=[]
for i in clm2:
z.append(float(i))
#wireframe plot
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
ax1.plot_wireframe(x,y,z)
plt.show()
#reader=csv.reader(f,delimiter=';')
#for row in reader:
#print row[0]
print "\nEnd back-prop demo \n"
# uses Python version 2.7.8
import random
import math
import csv, sys
from csv import DictReader
import sys
from scipy import optimize
from StringIO import StringIO
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# ------------------------------------
def show_data(matrix, num_first_rows):
#for i in range(len(matrix)):
for i in range(0, num_first_rows-1):
print "[" + str(i).rjust(2) + "]",
for j in range(len(matrix[i])):
print str( matrix[i][j]).rjust(5),
print "\n",
print "........"
last_row = len(matrix) - 1
print "[" + str(last_row).rjust(2) + "]",
for j in range(len(matrix[last_row])):
print str( matrix[last_row][j]).rjust(5),
print "\n"
def show_vector(vector):
for i in range(len(vector)):
if i % 8 == 0: # 8 columns
print "\n",
if vector[i] >= 0.0:
print '',
print "%.4f" % vector[i], # 4 decimals
print "\n"
# ------------------------------------
class NeuralNetwork:
def __init__(self, num_input, num_hidden, num_output):
self.num_input = num_input
self.num_hidden = num_hidden
self.num_output = num_output
self.inputs = [0 for i in range(num_input)]
self.ih_weights = self.make_matrix(num_input, num_hidden)
self.h_biases = [0 for i in range(num_hidden)]
self.h_outputs = [0 for i in range(num_hidden)]
self.ho_weights = self.make_matrix(num_hidden, num_output)
self.o_biases = [0 for i in range(num_output)]
self.outputs = [0 for i in range(num_output)]
# random.seed(0) # hidden function is 'normal' approach
self.rnd = random.Random(0) # allows multiple instances
self.initialize_weights()
def make_matrix(self, rows, cols):
result = [[0 for j in range(cols)] for i in range(rows)]
return result
def set_weights(self, weights):
k = 0
for i in range(self.num_input):
for j in range(self.num_hidden):
self.ih_weights[i][j] = weights[k]
k += 1
for i in range(self.num_hidden):
self.h_biases[i] = weights[k]
k += 1
for i in range(self.num_hidden):
for j in range(self.num_output):
self.ho_weights[i][j] = weights[k]
k += 1
for i in range(self.num_output):
self.o_biases[i] = weights[k]
k += 1
def get_weights(self):
num_wts = ((self.num_input * self.num_hidden) + self.num_hidden +
(self.num_hidden * self.num_output) + self.num_output)
result = [0 for i in range(num_wts)]
k = 0
for i in range(self.num_input):
for j in range(self.num_hidden):
result[k] = self.ih_weights[i][j]
k += 1
for i in range(self.num_hidden):
result[k] = self.h_biases[i]
k += 1
for i in range(self.num_hidden):
for j in range(self.num_output):
result[k] = self.ho_weights[i][j]
k += 1
for i in range(self.num_output):
result[k] = self.o_biases[i]
k += 1
return result
def initialize_weights(self):
num_wts = ((self.num_input * self.num_hidden) + self.num_hidden +
(self.num_hidden * self.num_output) + self.num_output)
wts = [0 for i in range(num_wts)]
lo = -0.01
hi = 0.01
for i in range(len(wts)):
wts[i] = (hi - lo) * self.rnd.random() + lo
self.set_weights(wts)
def compute_outputs(self, x_values):
h_sums = [0 for i in range(self.num_hidden)]
o_sums = [0 for i in range(self.num_output)]
for i in range(len(x_values)):
self.inputs[i] = x_values[i]
for j in range(self.num_hidden):
for i in range(self.num_input):
h_sums[j] += (float(self.inputs[i]) * self.ih_weights[i][j])
for i in range(self.num_hidden):
h_sums[i] += self.h_biases[i]
for i in range(self.num_hidden):
self.h_outputs[i] = self.hypertan(h_sums[i])
for j in range(self.num_output):
for i in range(self.num_hidden):
o_sums[j] += (self.h_outputs[i] * self.ho_weights[i][j])
for i in range(self.num_output):
o_sums[i] += self.o_biases[i]
soft_out = self.softmax(o_sums)
for i in range(self.num_output):
self.outputs[i] = soft_out[i]
result = [0 for i in range(self.num_output)]
for i in range(self.num_output):
result[i] = self.outputs[i]
return result
def hypertan(self, x):
if x < -20.0:
return -1.0
elif x > 20.0:
return 1.0
else:
return math.tanh(x)
def softmaxnaive(self, o_sums):
div = 0
for i in range(len(o_sums)):
div = div + math.exp(o_sums[i])
result = [0 for i in range(len(o_sums))]
for i in range(len(o_sums)):
result[i] = math.exp(o_sums[i]) / div
return result
def softmax(self, o_sums):
m = max(o_sums)
scale = 0
for i in range(len(o_sums)):
scale = scale + (math.exp(o_sums[i] - m))
result = [0 for i in range(len(o_sums))]
for i in range(len(o_sums)):
result[i] = math.exp(o_sums[i] - m) / scale
return result
def train(self, train_data, max_epochs, learn_rate, momentum):
o_grads = [0 for i in range(self.num_output)] # gradients
h_grads = [0 for i in range(self.num_hidden)]
ih_prev_weights_delta = self.make_matrix(num_input, num_hidden) # momentum
h_prev_biases_delta = [0 for i in range(self.num_hidden)]
ho_prev_weights_delta = self.make_matrix(num_hidden, num_output)
o_prev_biases_delta = [0 for i in range(self.num_output)]
epoch = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
sequence = [i for i in range(len(train_data))]
while epoch < max_epochs:
self.rnd.shuffle(sequence)
for ii in range(len(train_data)):
idx = sequence[ii]
for j in range(self.num_input): # peel off x_values
x_values[j] = train_data[idx][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = train_data[idx][j + self.num_input]
self.compute_outputs(x_values) # outputs stored internally
# --- update-weights (back-prop) section
for i in range(self.num_output): # 1. compute output gradients
derivative = (1 - self.outputs[i]) * self.outputs[i]
o_grads[i] = derivative * (float(t_values[i]) - float(self.outputs[i]))
for i in range(self.num_hidden): # 2. compute hidden gradients
derivative = (1 - self.h_outputs[i]) * (1 + self.h_outputs[i])
sum = 0
for j in range(self.num_output):
x = o_grads[j] * self.ho_weights[i][j]
sum += x
h_grads[i] = derivative * sum
for i in range(self.num_input): # 3a. update input-hidden weights
for j in range(self.num_hidden):
delta = learn_rate * h_grads[j] * float(self.inputs[i])
self.ih_weights[i][j] += delta
self.ih_weights[i][j] += momentum * ih_prev_weights_delta[i][j] # momentum
ih_prev_weights_delta[i][j] = delta # save the delta for momentum
for i in range(self.num_hidden): # 3b. update hidden biases
delta = learn_rate * h_grads[i]
self.h_biases[i] += delta
self.h_biases[i] += momentum * h_prev_biases_delta[i]; # momentum
h_prev_biases_delta[i] = delta # save the delta
for i in range(self.num_hidden): # 4a. update hidden-output weights
for j in range(self.num_output):
delta = learn_rate * o_grads[j] * self.h_outputs[i]
self.ho_weights[i][j] += delta
self.ho_weights[i][j] += momentum * ho_prev_weights_delta[i][j]; # momentum
ho_prev_weights_delta[i][j] = delta # save
for i in range(self.num_output): # 4b. update output biases
delta = learn_rate * o_grads[i]
self.o_biases[i] += delta
self.o_biases[i] += momentum * o_prev_biases_delta[i] # momentum
o_prev_biases_delta[i] = delta # save
# --- end update-weights
epoch += 1
result = self.get_weights()
return result
def accuracy(self, data):
num_correct = 0
num_wrong = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
for i in range(len(data)):
for j in range(self.num_input): # peel off x_values
x_values[j] = data[i][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = data[i][j + self.num_input]
y_values = self.compute_outputs(x_values)
max_index = y_values.index(max(y_values))
if (float(t_values[max_index]) < 0.85) & (float(t_values[max_index]) > 0.15):
num_correct += 1;
else:
num_wrong += 1;
return (num_correct * 1.0) / (num_correct + num_wrong)
def result(self, data):
num_correct = 0
num_wrong = 0
x_values = [0 for i in range(self.num_input)]
t_values = [0 for i in range(self.num_output)]
result = ""
for i in range(len(data)):
for j in range(self.num_input): # peel off x_values
x_values[j] = data[i][j]
for j in range(self.num_output): # peel off t_values
t_values[j] = data[i][j + self.num_input]
y_values = self.compute_outputs(x_values)
for i in range(len(y_values)):
result += str(y_values[i])+","
return result
# ------------------------------------
print "\nNeural network using Python"
print "\nGoal is to predict Wind Data \n"
print "The 30-item raw data looks like: \n"
print "[0] Anuradhapura, 2006, Jan, 3.5"
print "[1] Anuradhapura, 2006, Feb, 3.5"
print "[2] Anuradhapura, 2006, Mar, 3.1"
print ". . ."
print "[29] Batticaloa, 2006, June, 6.0"
train_data = ([[0 for j in range(37)]
for i in range(24)]) # 24 rows, 7 cols
train_data[0] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.13]
train_data[1] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.13]
train_data[2] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.11]
train_data[3] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.17]
train_data[4] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.3]
train_data[5] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.33]
train_data[6] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.36]
train_data[7] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.35]
train_data[8] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.32]
train_data[9] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.19]
train_data[10] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.09]
train_data[11] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.13]
train_data[12] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.21]
train_data[13] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.22]
train_data[14] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,1.1]
train_data[15] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.19]
train_data[16] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.29]
train_data[17] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.34]
train_data[18] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.49]
train_data[19] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.35]
train_data[20] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.39]
train_data[21] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.22]
train_data[22] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0.00,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.15]
train_data[23] = [ 0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,1.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.3]
test_data = ([[0 for j in range(37)]
for i in range(12)]) # 6 rows, 37 cols
test_data[0] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,0,0,0,1,0.17]
test_data[1] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,0,0,1,0,0.13]
test_data[2] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,0,1,0,0,0.14]
test_data[3] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,0,1,0,0,0,0.38]
test_data[4] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,0,1,0,0,0,0,0.22]
test_data[5] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,0,1,0,0,0,0,0,0.32]
test_data[6] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,0,1,0,0,0,0,0,0,0.34]
test_data[7] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,0,1,0,0,0,0,0,0,0,0.29]
test_data[8] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,0,1,0,0,0,0,0,0,0,0,0.25]
test_data[9] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,0,1,0,0,0,0,0,0,0,0,0,0.12]
test_data[10] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,0,1,0,0,0,0,0,0,0,0,0,0,0.09]
test_data[11] = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.89,1,0,0,0,0,0,0,0,0,0,0,0,0.13]
print "\nFirst few lines of encoded training data are: \n"
show_data(train_data, 4)
print "\nThe encoded test data is: \n"
show_data(test_data, 5)
print "\nCreating a 36-input, 37-hidden, 1-output neural network"
print "Using tanh and softmax activations \n"
num_input = 36
num_hidden = 37
num_output = 1
nn = NeuralNetwork(num_input, num_hidden, num_output)
max_epochs = 70 # artificially small
learn_rate = 0.08 # artificially large
momentum = 0.01
print "Setting max_epochs = " + str(max_epochs)
print "Setting learn_rate = " + str(learn_rate)
print "Setting momentum = " + str(momentum)
print "\nBeginning training using back-propagation"
weights = nn.train(train_data, max_epochs, learn_rate, momentum)
print "Training complete \n"
print "Final neural network weights and bias values:"
show_vector(weights)
print "Model accuracy on training data =",
acc_train = nn.accuracy(train_data)
print "%.4f" % acc_train
print "Model accuracy on test data =",
acc_test = nn.accuracy(test_data)
print "%.4f" % acc_test
print "---------------------------------------"
print "TESTING"
output = nn.result(test_data)
print output
filename2= 'datafile.csv'
with open(filename2,'rb') as f:
clm=[row["a"] for row in DictReader(f)]
x=[]
for i in clm:
x.append(float(i))
with open(filename2,'rb') as f:
clm1=[row["b"] for row in DictReader(f)]
y=[]
for i in clm1:
y.append(float(i))
with open(filename2,'rb') as f:
clm2=[row["c"] for row in DictReader(f)]
z=[]
for i in clm2:
z.append(float(i))
#wireframe plot
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
ax1.plot_wireframe(x,y,z)
plt.show()
#reader=csv.reader(f,delimiter=';')
#for row in reader:
#print row[0]
print "\nEnd back-prop demo \n"
Wednesday, December 30, 2015
Improve the knowledge about Javascript - Placement in HTML File
There is a flexibility given to include JavaScript code anywhere in an HTML document. However the most preferred ways to include JavaScript in an HTML file are as follows −
- Script in <head>...</head> section.
- Script in <body>...</body> section.
- Script in <body>...</body> and <head>...</head> sections.
- Script in an external file and then include in <head>...</head> section.
In the following section, we will see how we can place JavaScript in an HTML file in different ways.
JavaScript in <head>...</head> section
If you want to have a script run on some event, such as when a user clicks somewhere, then you will place that script in the head as follows −
<html> <head> <script type="text/javascript"> <!-- function sayHello() { alert("Hello World") } //--> </script> </head> <body> <input type="button" onclick="sayHello()" value="Say Hello" /> </body> </html>
This code will produce the following results −
JavaScript in <body>...</body> section
If you need a script to run as the page loads so that the script generates content in the page, then the script goes in the <body> portion of the document. In this case, you would not have any function defined using JavaScript. Take a look at the following code.
<html> <head> </head> <body> <script type="text/javascript"> <!-- document.write("Hello World") //--> </script> <p>This is web page body </p> </body> </html>
This code will produce the following results −
JavaScript in <body> and <head> Sections
You can put your JavaScript code in <head> and <body> section altogether as follows −
<html> <head> <script type="text/javascript"> <!-- function sayHello() { alert("Hello World") } //--> </script> </head> <body> <script type="text/javascript"> <!-- document.write("Hello World") //--> </script> <input type="button" onclick="sayHello()" value="Say Hello" /> </body> </html>
This code will produce the following result −
JavaScript in External File
As you begin to work more extensively with JavaScript, you will be likely to find that there are cases where you are reusing identical JavaScript code on multiple pages of a site.
You are not restricted to be maintaining identical code in multiple HTML files. The script tag provides a mechanism to allow you to store JavaScript in an external file and then include it into your HTML files.
Here is an example to show how you can include an external JavaScript file in your HTML code using script tag and its src attribute.
<html> <head> <script type="text/javascript" src="filename.js" ></script> </head> <body> ....... </body> </html>
To use JavaScript from an external file source, you need to write all your JavaScript source code in a simple text file with the extension ".js" and then include that file as shown above.
For example, you can keep the following content in filename.js file and then you can use sayHello function in your HTML file after including the filename.js file.
function sayHello() { alert("Hello World") }
Improve the knowledge about Javascript - Enabling JavaScript in Browsers
All the modern browsers come with built-in support for JavaScript. Frequently, you may need to enable or disable this support manually. This chapter explains the procedure of enabling and disabling JavaScript support in your browsers: Internet Explorer, Firefox, chrome, and Opera.
JavaScript in Internet Explorer
Here are simple steps to turn on or turn off JavaScript in your Internet Explorer −
- Follow Tools → Internet Options from the menu.
- Select Security tab from the dialog box.
- Click the Custom Level button.
- Scroll down till you find Scripting option.
- Select Enable radio button under Active scripting.
- Finally click OK and come out
To disable JavaScript support in your Internet Explorer, you need to selectDisable radio button under Active scripting.
JavaScript in Firefox
Here are the steps to turn on or turn off JavaScript in Firefox −
- Open a new tab → type about: config in the address bar.
- Then you will find the warning dialog. Select I’ll be careful, I promise!
- Then you will find the list of configure options in the browser.
- In the search bar, type javascript.enabled.
- There you will find the option to enable or disable javascript by right-clicking on the value of that option → select toggle.
If javascript.enabled is true; it converts to false upon clicking toogle. If javascript is disabled; it gets enabled upon clicking toggle.
JavaScript in Chrome
Here are the steps to turn on or turn off JavaScript in Chrome −
- Click the Chrome menu at the top right hand corner of your browser.
- Select Settings.
- Click Show advanced settings at the end of the page.
- Under the Privacy section, click the Content settings button.
- In the "Javascript" section, select "Do not allow any site to run JavaScript" or "Allow all sites to run JavaScript (recommended)".
JavaScript in Opera
Here are the steps to turn on or turn off JavaScript in Opera −
- Follow Tools → Preferences from the menu.
- Select Advanced option from the dialog box.
- Select Content from the listed items.
- Select Enable JavaScript checkbox.
- Finally click OK and come out.
To disable JavaScript support in your Opera, you should not select the Enable JavaScript checkbox.
Warning for Non-JavaScript Browsers
If you have to do something important using JavaScript, then you can display a warning message to the user using <noscript> tags.
You can add a noscript block immediately after the script block as follows −
<html> <body> <script language="javascript" type="text/javascript"> <!-- document.write("Hello World!") //--> </script> <noscript> Sorry...JavaScript is needed to go ahead. </noscript> </body> </html>
Now, if the user's browser does not support JavaScript or JavaScript is not enabled, then the message from </noscript> will be displayed on the screen.
Improve the knowledge about Javascript - Syntax
JavaScript can be implemented using JavaScript statements that are placed within the <script>... </script> HTML tags in a web page.
You can place the <script> tags, containing your JavaScript, anywhere within you web page, but it is normally recommended that you should keep it within the <head> tags.
The <script> tag alerts the browser program to start interpreting all the text between these tags as a script. A simple syntax of your JavaScript will appear as follows.
<script ...> JavaScript code </script>
The script tag takes two important attributes −
- Language − This attribute specifies what scripting language you are using. Typically, its value will be javascript. Although recent versions of HTML (and XHTML, its successor) have phased out the use of this attribute.
- Type − This attribute is what is now recommended to indicate the scripting language in use and its value should be set to "text/javascript".
So your JavaScript segment will look like −
<script language="javascript" type="text/javascript"> JavaScript code </script>
Your First JavaScript Script
Let us take a sample example to print out "Hello World". We added an optional HTML comment that surrounds our JavaScript code. This is to save our code from a browser that does not support JavaScript. The comment ends with a "//-->". Here "//" signifies a comment in JavaScript, so we add that to prevent a browser from reading the end of the HTML comment as a piece of JavaScript code. Next, we call a function document.write which writes a string into our HTML document.
This function can be used to write text, HTML, or both. Take a look at the following code.
<html> <body> <script language="javascript" type="text/javascript"> <!-- document.write("Hello World!") //--> </script> </body> </html>
This code will produce the following result −
Hello World!
Whitespace and Line Breaks
JavaScript ignores spaces, tabs, and newlines that appear in JavaScript programs. You can use spaces, tabs, and newlines freely in your program and you are free to format and indent your programs in a neat and consistent way that makes the code easy to read and understand.
Semicolons are Optional
Simple statements in JavaScript are generally followed by a semicolon character, just as they are in C, C++, and Java. JavaScript, however, allows you to omit this semicolon if each of your statements are placed on a separate line. For example, the following code could be written without semicolons.
<script language="javascript" type="text/javascript"> <!-- var1 = 10 var2 = 20 //--> </script>
But when formatted in a single line as follows, you must use semicolons −
<script language="javascript" type="text/javascript"> <!-- var1 = 10; var2 = 20; //--> </script>
Note − It is a good programming practice to use semicolons.
Case Sensitivity
JavaScript is a case-sensitive language. This means that the language keywords, variables, function names, and any other identifiers must always be typed with a consistent capitalization of letters.
So the identifiers Time and TIME will convey different meanings in JavaScript.
NOTE − Care should be taken while writing variable and function names in JavaScript.
Comments in JavaScript
JavaScript supports both C-style and C++-style comments, Thus −
- Any text between a // and the end of a line is treated as a comment and is ignored by JavaScript.
- Any text between the characters /* and */ is treated as a comment. This may span multiple lines.
- JavaScript also recognizes the HTML comment opening sequence <!--. JavaScript treats this as a single-line comment, just as it does the // comment.
- The HTML comment closing sequence --> is not recognized by JavaScript so it should be written as //-->.
Example
The following example shows how to use comments in JavaScript.
<script language="javascript" type="text/javascript"> <!-- // This is a comment. It is similar to comments in C++ /* * This is a multiline comment in JavaScript * It is very similar to comments in C Programming */ //--> </script>
Subscribe to:
Comments (Atom)