import numpy as np
feature_set = np.array([[0,1,0],[0,0,1],[1,0,0],[1,1,0],[1,1,1]])
labels = np.array([[1,0,0,1,1]])
labels = labels.reshape(5,1)
# In[3]:
labels
# In[8]:
#define hyper parameters for our neural network
np.random.seed(42) #random.seed function so that we can get the same random values whenever the script is executed.
weights = np.random.rand(3,1)
bias = np.random.rand(1)
lr = 0.05
# In[9]:
weights #Return a sample (or samples) from the “standard normal” distribution( “normal” (Gaussian) distribution of mean 0 and variance 1)
# In[10]:
bias #Create an array of the given shape and populate it with random samples from a uniform distribution over [0, 1)
# In[11]:
lr # learning rate
# In[12]:
def sigmoid(x): #activation function is the sigmoid function
return 1/(1+np.exp(-x))
# In[13]:
def sigmoid_der(x): #calculates the derivative of the sigmoid function
return sigmoid(x)*(1-sigmoid(x))
# In[14]:
#train our neural network that will be able to predict whether a person is obese or not.
# An epoch is basically the number of times we want to train the algorithm on our data.
#We will train the algorithm on our data 20,000 times. The ultimate goal is to minimize the error.
for epoch in range(20000):
inputs = feature_set
#Here we find the dot product of the input and the weight vector and add bias to it.
# feedforward step1
XW = np.dot(feature_set, weights) + bias
#We pass the dot product through the sigmoid activation function
#feedforward step2
z = sigmoid(XW)
#The variable z contains the predicted outputs. The first step of the backpropagation is to find the error.
# backpropagation step 1
error = z - labels
print(error.sum())
# backpropagation step 2
dcost_dpred = error
dpred_dz = sigmoid_der(z)
#Here we have the z_delta variable, which contains the product of dcost_dpred and dpred_dz.
#Instead of looping through each record and multiplying the input with corresponding z_delta,
#we take the transpose of the input feature matrix and multiply it with the z_delta.
#Finally, we multiply the learning rate variable lr with the derivative to increase the speed of convergence.
z_delta = dcost_dpred * dpred_dz
inputs = feature_set.T
weights -= lr * np.dot(inputs, z_delta)
for num in z_delta:
bias -= lr * num
# In[15]:
#You can see that error is extremely small at the end of the training of our neural network.
#At this point of time our weights and bias will have values that can be used to detect whether a person is diabetic or not,
#based on his smoking habits, obesity, and exercise habits.
#TEST : suppose we have a record of a patient that comes in who smokes, is not obese, and doesn't exercise.
#Let's find if he is likely to be diabetic or not. The input feature will look like this: [1,0,0].
single_point = np.array([1,0,0])
result = sigmoid(np.dot(single_point, weights) + bias)
print(result)
# In[ ]:
#You can see that the person is likely not diabetic since the value is much closer to 0 than 1.
# In[16]:
#let's test another person who doesn't, smoke, is obese, and doesn't exercises. The input feature vector will be [0,1,0]
single_point = np.array([0,1,0])
result = sigmoid(np.dot(single_point, weights) + bias)
print(result)
# In[18]:
#value is very close to 1, which is likely due to the person's obesity.
#Multiply the result by 100 percent to convert the accuracy to a percentage. For our thermometer example:
#Relative accuracy = Accuracy x 100 percent = 0.968 x 100 percent = 96.8 percent
A=0.00707584 * 100
B=0.99837029 * 100
print(A)
print(B)
No comments:
Post a Comment