|
| 1 | +# Random Forest Classifier |
| 2 | + |
| 3 | +# Importing the libraries |
| 4 | + |
| 5 | +import numpy as np |
| 6 | +import matplotlib.pyplot as plt |
| 7 | +import pandas as pd |
| 8 | + |
| 9 | +# Importing the datasets |
| 10 | + |
| 11 | +datasets = pd.read_csv('Social_Network_Ads.csv') |
| 12 | +X = datasets.iloc[:, [2,3]].values |
| 13 | +Y = datasets.iloc[:, 4].values |
| 14 | + |
| 15 | +# Splitting the dataset into the Training set and Test set |
| 16 | + |
| 17 | +from sklearn.model_selection import train_test_split |
| 18 | +X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size = 0.25, random_state = 0) |
| 19 | + |
| 20 | +# Feature Scaling |
| 21 | + |
| 22 | +from sklearn.preprocessing import StandardScaler |
| 23 | +sc_X = StandardScaler() |
| 24 | +X_Train = sc_X.fit_transform(X_Train) |
| 25 | +X_Test = sc_X.transform(X_Test) |
| 26 | + |
| 27 | +# Fitting the classifier into the Training set |
| 28 | + |
| 29 | +from sklearn.ensemble import RandomForestClassifier |
| 30 | +classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) |
| 31 | + |
| 32 | +# Predicting the test set results |
| 33 | + |
| 34 | +Y_Pred = classifier.predict(X_Test) |
| 35 | + |
| 36 | +# Making the Confusion Matrix |
| 37 | + |
| 38 | +from sklearn.metrics import confusion_matrix |
| 39 | +cm = confusion_matrix(Y_Test, Y_Pred) |
| 40 | + |
| 41 | +# Visualising the Training set results |
| 42 | + |
| 43 | +from matplotlib.colors import ListedColormap |
| 44 | +X_Set, Y_Set = X_Train, Y_Train |
| 45 | +X1, X2 = np.meshgrid(np.arange(start = X_Set[:, 0].min() - 1, stop = X_Set[:, 0].max() + 1, step = 0.01), |
| 46 | + np.arange(start = X_Set[:, 1].min() - 1, stop = X_Set[:, 1].max() + 1, step = 0.01)) |
| 47 | +plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), |
| 48 | + alpha = 0.75, cmap = ListedColormap(('red', 'green'))) |
| 49 | +plt.xlim(X1.min(), X1.max()) |
| 50 | +plt.ylim(X2.min(), X2.max()) |
| 51 | +for i, j in enumerate(np.unique(Y_Set)): |
| 52 | + plt.scatter(X_Set[Y_Set == j, 0], X_Set[Y_Set == j, 1], |
| 53 | + c = ListedColormap(('red', 'green'))(i), label = j) |
| 54 | +plt.title('Random Forest Classifier (Training set)') |
| 55 | +plt.xlabel('Age') |
| 56 | +plt.ylabel('Estimated Salary') |
| 57 | +plt.legend() |
| 58 | +plt.show() |
| 59 | + |
| 60 | +# Visualising the Test set results |
| 61 | + |
| 62 | +from matplotlib.colors import ListedColormap |
| 63 | +X_Set, Y_Set = X_Test, Y_Test |
| 64 | +X1, X2 = np.meshgrid(np.arange(start = X_Set[:, 0].min() - 1, stop = X_Set[:, 0].max() + 1, step = 0.01), |
| 65 | + np.arange(start = X_Set[:, 1].min() - 1, stop = X_Set[:, 1].max() + 1, step = 0.01)) |
| 66 | +plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), |
| 67 | + alpha = 0.75, cmap = ListedColormap(('red', 'green'))) |
| 68 | +plt.xlim(X1.min(), X1.max()) |
| 69 | +plt.ylim(X2.min(), X2.max()) |
| 70 | +for i, j in enumerate(np.unique(Y_Set)): |
| 71 | + plt.scatter(X_Set[Y_Set == j, 0], X_Set[Y_Set == j, 1], |
| 72 | + c = ListedColormap(('red', 'green'))(i), label = j) |
| 73 | +plt.title('Random Forest Classifier (Test set)') |
| 74 | +plt.xlabel('Age') |
| 75 | +plt.ylabel('Estimated Salary') |
| 76 | +plt.legend() |
| 77 | +plt.show() |
0 commit comments