Skip to content

Commit 9c60690

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 39e95c798267810146919b13c41743695642f0c1
1 parent afd18bb commit 9c60690

File tree

952 files changed

+2932
-2932
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

952 files changed

+2932
-2932
lines changed
72 Bytes
Binary file not shown.
72 Bytes
Binary file not shown.

dev/_downloads/plot_forest_iris.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,\n AdaBoostClassifier)\nfrom sklearn.tree import DecisionTreeClassifier\n\n# Parameters\nn_classes = 3\nn_estimators = 30\ncmap = plt.cm.RdYlBu\nplot_step = 0.02 # fine step width for decision surface contours\nplot_step_coarser = 0.5 # step widths for coarse classifier guesses\nRANDOM_SEED = 13 # fix the seed on each iteration\n\n# Load data\niris = load_iris()\n\nplot_idx = 1\n\nmodels = [DecisionTreeClassifier(max_depth=None),\n RandomForestClassifier(n_estimators=n_estimators),\n ExtraTreesClassifier(n_estimators=n_estimators),\n AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),\n n_estimators=n_estimators)]\n\nfor pair in ([0, 1], [0, 2], [2, 3]):\n for model in models:\n # We only take the two corresponding features\n X = iris.data[:, pair]\n y = iris.target\n\n # Shuffle\n idx = np.arange(X.shape[0])\n np.random.seed(RANDOM_SEED)\n np.random.shuffle(idx)\n X = X[idx]\n y = y[idx]\n\n # Standardize\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n X = (X - mean) / std\n\n # Train\n model.fit(X, y)\n\n scores = model.score(X, y)\n # Create a title for each column and the console by using str() and\n # slicing away useless parts of the string\n model_title = str(type(model)).split(\n \".\")[-1][:-2][:-len(\"Classifier\")]\n\n model_details = model_title\n if hasattr(model, \"estimators_\"):\n model_details += \" with {} estimators\".format(\n len(model.estimators_))\n print(model_details + \" with features\", pair,\n \"has a score of\", scores)\n\n plt.subplot(3, 4, plot_idx)\n if plot_idx <= len(models):\n # Add a title at the top of each column\n plt.title(model_title)\n\n # Now plot the decision boundary using a fine mesh as input to a\n # filled contour plot\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),\n np.arange(y_min, y_max, plot_step))\n\n # Plot either a single DecisionTreeClassifier or alpha blend the\n # decision surfaces of the ensemble of classifiers\n if isinstance(model, DecisionTreeClassifier):\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=cmap)\n else:\n # Choose alpha blend level with respect to the number\n # of estimators\n # that are in use (noting that AdaBoost can use fewer estimators\n # than its maximum if it achieves a good enough fit early on)\n estimator_alpha = 1.0 / len(model.estimators_)\n for tree in model.estimators_:\n Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)\n\n # Build a coarser grid to plot a set of ensemble classifications\n # to show how these are different to what we see in the decision\n # surfaces. These points are regularly space and do not have a\n # black outline\n xx_coarser, yy_coarser = np.meshgrid(\n np.arange(x_min, x_max, plot_step_coarser),\n np.arange(y_min, y_max, plot_step_coarser))\n Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(),\n yy_coarser.ravel()]\n ).reshape(xx_coarser.shape)\n cs_points = plt.scatter(xx_coarser, yy_coarser, s=15,\n c=Z_points_coarser, cmap=cmap,\n edgecolors=\"none\")\n\n # Plot the training points, these are clustered together and have a\n # black outline\n plt.scatter(X[:, 0], X[:, 1], c=y,\n cmap=ListedColormap(['r', 'y', 'b']),\n edgecolor='k', s=20)\n plot_idx += 1 # move on to the next plot in sequence\n\nplt.suptitle(\"Classifiers on feature subsets of the Iris dataset\")\nplt.axis(\"tight\")\n\nplt.show()"
29+
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,\n AdaBoostClassifier)\nfrom sklearn.tree import DecisionTreeClassifier\n\n# Parameters\nn_classes = 3\nn_estimators = 30\ncmap = plt.cm.RdYlBu\nplot_step = 0.02 # fine step width for decision surface contours\nplot_step_coarser = 0.5 # step widths for coarse classifier guesses\nRANDOM_SEED = 13 # fix the seed on each iteration\n\n# Load data\niris = load_iris()\n\nplot_idx = 1\n\nmodels = [DecisionTreeClassifier(max_depth=None),\n RandomForestClassifier(n_estimators=n_estimators),\n ExtraTreesClassifier(n_estimators=n_estimators),\n AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),\n n_estimators=n_estimators)]\n\nfor pair in ([0, 1], [0, 2], [2, 3]):\n for model in models:\n # We only take the two corresponding features\n X = iris.data[:, pair]\n y = iris.target\n\n # Shuffle\n idx = np.arange(X.shape[0])\n np.random.seed(RANDOM_SEED)\n np.random.shuffle(idx)\n X = X[idx]\n y = y[idx]\n\n # Standardize\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n X = (X - mean) / std\n\n # Train\n model.fit(X, y)\n\n scores = model.score(X, y)\n # Create a title for each column and the console by using str() and\n # slicing away useless parts of the string\n model_title = str(type(model)).split(\n \".\")[-1][:-2][:-len(\"Classifier\")]\n\n model_details = model_title\n if hasattr(model, \"estimators_\"):\n model_details += \" with {} estimators\".format(\n len(model.estimators_))\n print(model_details + \" with features\", pair,\n \"has a score of\", scores)\n\n plt.subplot(3, 4, plot_idx)\n if plot_idx <= len(models):\n # Add a title at the top of each column\n plt.title(model_title, fontsize=9)\n\n # Now plot the decision boundary using a fine mesh as input to a\n # filled contour plot\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),\n np.arange(y_min, y_max, plot_step))\n\n # Plot either a single DecisionTreeClassifier or alpha blend the\n # decision surfaces of the ensemble of classifiers\n if isinstance(model, DecisionTreeClassifier):\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=cmap)\n else:\n # Choose alpha blend level with respect to the number\n # of estimators\n # that are in use (noting that AdaBoost can use fewer estimators\n # than its maximum if it achieves a good enough fit early on)\n estimator_alpha = 1.0 / len(model.estimators_)\n for tree in model.estimators_:\n Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)\n\n # Build a coarser grid to plot a set of ensemble classifications\n # to show how these are different to what we see in the decision\n # surfaces. These points are regularly space and do not have a\n # black outline\n xx_coarser, yy_coarser = np.meshgrid(\n np.arange(x_min, x_max, plot_step_coarser),\n np.arange(y_min, y_max, plot_step_coarser))\n Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(),\n yy_coarser.ravel()]\n ).reshape(xx_coarser.shape)\n cs_points = plt.scatter(xx_coarser, yy_coarser, s=15,\n c=Z_points_coarser, cmap=cmap,\n edgecolors=\"none\")\n\n # Plot the training points, these are clustered together and have a\n # black outline\n plt.scatter(X[:, 0], X[:, 1], c=y,\n cmap=ListedColormap(['r', 'y', 'b']),\n edgecolor='k', s=20)\n plot_idx += 1 # move on to the next plot in sequence\n\nplt.suptitle(\"Classifiers on feature subsets of the Iris dataset\", fontsize=12)\nplt.axis(\"tight\")\nplt.tight_layout(h_pad=0.2, w_pad=0.2, pad=2.5)\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_forest_iris.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@
107107
plt.subplot(3, 4, plot_idx)
108108
if plot_idx <= len(models):
109109
# Add a title at the top of each column
110-
plt.title(model_title)
110+
plt.title(model_title, fontsize=9)
111111

112112
# Now plot the decision boundary using a fine mesh as input to a
113113
# filled contour plot
@@ -154,7 +154,7 @@
154154
edgecolor='k', s=20)
155155
plot_idx += 1 # move on to the next plot in sequence
156156

157-
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
157+
plt.suptitle("Classifiers on feature subsets of the Iris dataset", fontsize=12)
158158
plt.axis("tight")
159-
159+
plt.tight_layout(h_pad=0.2, w_pad=0.2, pad=2.5)
160160
plt.show()

dev/_downloads/scikit-learn-docs.pdf

-33.7 KB
Binary file not shown.
744 Bytes
744 Bytes
-16 Bytes
-18 Bytes
-26.6 KB
-26.6 KB
-15.7 KB
40 Bytes
-200 Bytes
-90 Bytes
-90 Bytes
-75 Bytes
-75 Bytes
129 Bytes
129 Bytes
112 Bytes
112 Bytes
17 Bytes
28 Bytes
28 Bytes
128 Bytes
-1015 Bytes
-1015 Bytes
-300 Bytes
68 Bytes
50 Bytes
50 Bytes
-66 Bytes
3 Bytes

dev/_sources/auto_examples/applications/plot_face_recognition.rst.txt

+20-20

dev/_sources/auto_examples/applications/plot_model_complexity_influence.rst.txt

+14-14

0 commit comments

Comments
 (0)