Friday 24 May 2019

MLP NN

import pandas as pd

# Location of dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"

# Assign colum names to the dataset
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'Class']

# Read dataset to pandas dataframe
irisdata = pd.read_csv(url, names=names) 
In [2]:
irisdata.head()  
Out[2]:
sepal-lengthsepal-widthpetal-lengthpetal-widthClass
05.13.51.40.2Iris-setosa
14.93.01.40.2Iris-setosa
24.73.21.30.2Iris-setosa
34.63.11.50.2Iris-setosa
45.03.61.40.2Iris-setosa
In [3]:
# Assign data from first four columns to X variable
X = irisdata.iloc[:, 0:4]

# Assign data from first fifth columns to y variable
y = irisdata.select_dtypes(include=[object])  
In [4]:
y.head() 
Out[4]:
Class
0Iris-setosa
1Iris-setosa
2Iris-setosa
3Iris-setosa
4Iris-setosa
In [5]:
y.Class.unique()  
Out[5]:
array(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'], dtype=object)
In [6]:
from sklearn import preprocessing  
le = preprocessing.LabelEncoder()

y = y.apply(le.fit_transform)  
In [7]:
y
Out[7]:
Class
00
10
20
30
40
50
60
70
80
90
100
110
120
130
140
150
160
170
180
190
200
210
220
230
240
250
260
270
280
290
......
1202
1212
1222
1232
1242
1252
1262
1272
1282
1292
1302
1312
1322
1332
1342
1352
1362
1372
1382
1392
1402
1412
1422
1432
1442
1452
1462
1472
1482
1492
150 rows × 1 columns
In [8]:
from sklearn.model_selection import train_test_split  
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)  
In [9]:
from sklearn.preprocessing import StandardScaler  
scaler = StandardScaler()  
scaler.fit(X_train)

X_train = scaler.transform(X_train)  
X_test = scaler.transform(X_test)  
In [10]:
from sklearn.neural_network import MLPClassifier  
mlp = MLPClassifier(hidden_layer_sizes=(10, 10, 10), max_iter=1000)  
mlp.fit(X_train, y_train.values.ravel())  
Out[10]:
MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
       beta_2=0.999, early_stopping=False, epsilon=1e-08,
       hidden_layer_sizes=(10, 10, 10), learning_rate='constant',
       learning_rate_init=0.001, max_iter=1000, momentum=0.9,
       nesterovs_momentum=True, power_t=0.5, random_state=None,
       shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,
       verbose=False, warm_start=False)
In [11]:
predictions = mlp.predict(X_test)  
In [12]:
predictions
Out[12]:
array([0, 2, 1, 2, 0, 0, 2, 2, 2, 0, 1, 1, 0, 1, 1, 2, 0, 0, 2, 1, 1, 1,
       1, 0, 1, 1, 2, 2, 1, 0])
In [13]:
#Evaluating the Algorithm
from sklearn.metrics import classification_report, confusion_matrix  
print(confusion_matrix(y_test,predictions))  
print(classification_report(y_test,predictions))
[[ 9  0  0]
 [ 0 12  1]
 [ 0  0  8]]
             precision    recall  f1-score   support

          0       1.00      1.00      1.00         9
          1       1.00      0.92      0.96        13
          2       0.89      1.00      0.94         8

avg / total       0.97      0.97      0.97        30

No comments:

Post a Comment