|
41 | 41 | print(__doc__) |
42 | 42 |
|
43 | 43 | import numpy as np |
44 | | -import pylab as pl |
| 44 | +import matplotlib.pyplot as plt |
45 | 45 |
|
46 | 46 | from sklearn import clone |
47 | 47 | from sklearn.datasets import load_iris |
|
54 | 54 | n_classes = 3 |
55 | 55 | n_estimators = 30 |
56 | 56 | plot_colors = "ryb" |
57 | | -cmap = pl.cm.RdYlBu |
| 57 | +cmap = plt.cm.RdYlBu |
58 | 58 | plot_step = 0.02 # fine step width for decision surface contours |
59 | 59 | plot_step_coarser = 0.5 # step widths for coarse classifier guesses |
60 | 60 | RANDOM_SEED = 13 # fix the seed on each iteration |
|
101 | 101 | model_details += " with {} estimators".format(len(model.estimators_)) |
102 | 102 | print( model_details + " with features", pair, "has a score of", scores ) |
103 | 103 |
|
104 | | - pl.subplot(3, 4, plot_idx) |
| 104 | + plt.subplot(3, 4, plot_idx) |
105 | 105 | if plot_idx <= len(models): |
106 | 106 | # Add a title at the top of each column |
107 | | - pl.title(model_title) |
| 107 | + plt.title(model_title) |
108 | 108 |
|
109 | 109 | # Now plot the decision boundary using a fine mesh as input to a |
110 | 110 | # filled contour plot |
|
118 | 118 | if isinstance(model, DecisionTreeClassifier): |
119 | 119 | Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) |
120 | 120 | Z = Z.reshape(xx.shape) |
121 | | - cs = pl.contourf(xx, yy, Z, cmap=cmap) |
| 121 | + cs = plt.contourf(xx, yy, Z, cmap=cmap) |
122 | 122 | else: |
123 | 123 | # Choose alpha blend level with respect to the number of estimators |
124 | 124 | # that are in use (noting that AdaBoost can use fewer estimators |
|
127 | 127 | for tree in model.estimators_: |
128 | 128 | Z = tree.predict(np.c_[xx.ravel(), yy.ravel()]) |
129 | 129 | Z = Z.reshape(xx.shape) |
130 | | - cs = pl.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) |
| 130 | + cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) |
131 | 131 |
|
132 | 132 | # Build a coarser grid to plot a set of ensemble classifications |
133 | 133 | # to show how these are different to what we see in the decision |
134 | 134 | # surfaces. These points are regularly space and do not have a black outline |
135 | 135 | xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser), |
136 | 136 | np.arange(y_min, y_max, plot_step_coarser)) |
137 | 137 | Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape) |
138 | | - cs_points = pl.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none") |
| 138 | + cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none") |
139 | 139 |
|
140 | 140 | # Plot the training points, these are clustered together and have a |
141 | 141 | # black outline |
142 | 142 | for i, c in zip(xrange(n_classes), plot_colors): |
143 | 143 | idx = np.where(y == i) |
144 | | - pl.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i], |
| 144 | + plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i], |
145 | 145 | cmap=cmap) |
146 | 146 |
|
147 | 147 | plot_idx += 1 # move on to the next plot in sequence |
148 | 148 |
|
149 | | -pl.suptitle("Classifiers on feature subsets of the Iris dataset") |
150 | | -pl.axis("tight") |
| 149 | +plt.suptitle("Classifiers on feature subsets of the Iris dataset") |
| 150 | +plt.axis("tight") |
151 | 151 |
|
152 | | -pl.show() |
| 152 | +plt.show() |
0 commit comments