On charge les données et on parcours les données sur lesquelles nous allons travailler
from sklearn import datasets
wine = datasets.load_wine()
print(wine.DESCR)
print('Features: ', wine.feature_names)
print('Labels: ', wine.target_names)
data = wine.data
target = wine.target
print(data)
print(target)
On sépare les données en deux parties : une pour l'entrainement et une pour le test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=.3, random_state=109)
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
from sklearn import metrics
scores = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: ','{:2.2%}'.format(scores))
cm = metrics.confusion_matrix(y_test, y_pred)
print(cm)
(20+15+14)/(20+15+14+1+2+2)
Le dataset que nous allons utiliser ici est décrit dans : https://archive.ics.uci.edu/ml/datasets/banknote+authentication
Il s'agit pour faire simple d'une procedure d'authentification à partir d'images.
import pandas as pa
bankdata = pa.read_csv('bill_authentication.csv')
bankdata.head()
X = bankdata.drop('Class', axis=1)
y = bankdata['Class']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=109)
from sklearn.svm import SVC
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
scores = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: ','{:2.2%}'.format(scores))
cm = metrics.confusion_matrix(y_test, y_pred)
print(cm)
Et si on comparait avec un classifieur bayesien :
clsb = GaussianNB()
clsb.fit(X_train, y_train)
y_pred = clsb.predict(X_test)
scores = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: ','{:2.2%}'.format(scores))
col_names = ['pregnant', 'glucose', 'bp', 'skin', 'insulin', 'bmi', 'pedigree', 'age', 'label']
pima = pa.read_csv('pima-indians-diabetes.csv',header=None, names=col_names)
pima.head()
feature_cols = ['pregnant', 'insulin', 'bmi', 'age','glucose','bp','pedigree']
X = pima[feature_cols]
y = pima.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=109)
from sklearn import tree
clf_dt = tree.DecisionTreeClassifier()
clf_dt.fit(X_train, y_train)
y_pred = clf_dt.predict(X_test)
print(y_pred.shape)
print(y_test.shape)
scores = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: ','{:2.2%}'.format(scores))
from sklearn.ensemble import RandomForestClassifier
clf_rf = RandomForestClassifier()
clf_rf.fit(X_train, y_train)
y_pred = clf_rf.predict(X_test)
scores = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: ','{:2.2%}'.format(scores))
Bon, c'est mieux que les DT, essayons de faire mieux ...
clf_rf = RandomForestClassifier()
clf_rf.fit(X_train, y_train)
On voit qu'il y a bien plein de paramètres sur lesquels on peut jouer. Essayon un peu :
clf_rf = RandomForestClassifier(bootstrap=False, max_depth=5)
clf_rf.fit(X_train, y_train)
y_pred = clf_rf.predict(X_test)
scores = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: ','{:2.2%}'.format(scores))
C'est mieux ... On a envie d'essayer avec plein de paramètres.
Problème : cela peut être long très long et fastidieux.
Heureusement, on peut utiliser une grid_search :
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
print(random_grid)
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf = RandomForestClassifier()
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100,
cv = 3, verbose=2, random_state=42, n_jobs = -1)
rf_random.fit(X_train, y_train)
rf_random.best_params_
clf_rf = RandomForestClassifier(bootstrap=True,max_depth=40,max_features='auto',min_samples_leaf=4,min_samples_split=2,n_estimators=600)
clf_rf.fit(X_train, y_train)
y_pred = clf_rf.predict(X_test)
scores = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: ','{:2.2%}'.format(scores))