Skip to content

Commit e8ff4f3

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 9328581ec5213e743fca116db06e7beef9a9da00
1 parent 47faaca commit e8ff4f3

File tree

1,080 files changed

+3311
-3417
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,080 files changed

+3311
-3417
lines changed
40 Bytes
Binary file not shown.
40 Bytes
Binary file not shown.

dev/_downloads/plot_faces_decomposition.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\n# Authors: Vlad Niculae, Alexandre Gramfort\n# License: BSD 3 clause\n\nimport logging\nfrom time import time\n\nfrom numpy.random import RandomState\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn import decomposition\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\nn_row, n_col = 2, 3\nn_components = n_row * n_col\nimage_shape = (64, 64)\nrng = RandomState(0)\n\n# #############################################################################\n# Load faces data\ndataset = fetch_olivetti_faces(shuffle=True, random_state=rng)\nfaces = dataset.data\n\nn_samples, n_features = faces.shape\n\n# global centering\nfaces_centered = faces - faces.mean(axis=0)\n\n# local centering\nfaces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)\n\nprint(\"Dataset consists of %d faces\" % n_samples)\n\n\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):\n plt.figure(figsize=(2. * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(image_shape), cmap=cmap,\n interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)\n\n# #############################################################################\n# List of the different estimators, whether to center and transpose the\n# problem, and whether the transformer uses the clustering API.\nestimators = [\n ('Eigenfaces - PCA using randomized SVD',\n decomposition.PCA(n_components=n_components, svd_solver='randomized',\n whiten=True),\n True),\n\n ('Non-negative components - NMF',\n decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),\n False),\n\n ('Independent components - FastICA',\n decomposition.FastICA(n_components=n_components, whiten=True),\n True),\n\n ('Sparse comp. - MiniBatchSparsePCA',\n decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,\n n_iter=100, batch_size=3,\n random_state=rng,\n normalize_components=True),\n True),\n\n ('MiniBatchDictionaryLearning',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng),\n True),\n\n ('Cluster centers - MiniBatchKMeans',\n MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,\n max_iter=50, random_state=rng),\n True),\n\n ('Factor Analysis components - FA',\n decomposition.FactorAnalysis(n_components=n_components, max_iter=20),\n True),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\"First centered Olivetti faces\", faces_centered[:n_components])\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = (time() - t0)\n print(\"done in %0.3fs\" % train_time)\n if hasattr(estimator, 'cluster_centers_'):\n components_ = estimator.cluster_centers_\n else:\n components_ = estimator.components_\n\n # Plot an image representing the pixelwise variance provided by the\n # estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,\n # via the PCA decomposition, also provides a scalar noise_variance_\n # (the mean of pixelwise variance) that cannot be displayed as an image\n # so we skip it.\n if (hasattr(estimator, 'noise_variance_') and\n estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case\n plot_gallery(\"Pixelwise variance\",\n estimator.noise_variance_.reshape(1, -1), n_col=1,\n n_row=1)\n plot_gallery('%s - Train time %.1fs' % (name, train_time),\n components_[:n_components])\n\nplt.show()\n\n# #############################################################################\n# Various positivity constraints applied to dictionary learning.\nestimators = [\n ('Dictionary learning',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng),\n True),\n ('Dictionary learning - positive dictionary',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng,\n positive_dict=True),\n True),\n ('Dictionary learning - positive code',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng,\n positive_code=True),\n True),\n ('Dictionary learning - positive dictionary & code',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng,\n positive_dict=True,\n positive_code=True),\n True),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\"First centered Olivetti faces\", faces_centered[:n_components],\n cmap=plt.cm.RdBu)\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = (time() - t0)\n print(\"done in %0.3fs\" % train_time)\n components_ = estimator.components_\n plot_gallery(name, components_[:n_components], cmap=plt.cm.RdBu)\n\nplt.show()"
29+
"print(__doc__)\n\n# Authors: Vlad Niculae, Alexandre Gramfort\n# License: BSD 3 clause\n\nimport logging\nfrom time import time\n\nfrom numpy.random import RandomState\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn import decomposition\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\nn_row, n_col = 2, 3\nn_components = n_row * n_col\nimage_shape = (64, 64)\nrng = RandomState(0)\n\n# #############################################################################\n# Load faces data\ndataset = fetch_olivetti_faces(shuffle=True, random_state=rng)\nfaces = dataset.data\nn_samples, n_features = faces.shape\n\n# global centering\nfaces_centered = faces - faces.mean(axis=0)\n\n# local centering\nfaces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)\n\nprint(\"Dataset consists of %d faces\" % n_samples)\n\n\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):\n plt.figure(figsize=(2. * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(image_shape), cmap=cmap,\n interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)\n\n# #############################################################################\n# List of the different estimators, whether to center and transpose the\n# problem, and whether the transformer uses the clustering API.\nestimators = [\n ('Eigenfaces - PCA using randomized SVD',\n decomposition.PCA(n_components=n_components, svd_solver='randomized',\n whiten=True),\n True),\n\n ('Non-negative components - NMF',\n decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),\n False),\n\n ('Independent components - FastICA',\n decomposition.FastICA(n_components=n_components, whiten=True),\n True),\n\n ('Sparse comp. - MiniBatchSparsePCA',\n decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,\n n_iter=100, batch_size=3,\n random_state=rng),\n True),\n\n ('MiniBatchDictionaryLearning',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng),\n True),\n\n ('Cluster centers - MiniBatchKMeans',\n MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,\n max_iter=50, random_state=rng),\n True),\n\n ('Factor Analysis components - FA',\n decomposition.FactorAnalysis(n_components=n_components, max_iter=20),\n True),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\"First centered Olivetti faces\", faces_centered[:n_components])\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = (time() - t0)\n print(\"done in %0.3fs\" % train_time)\n if hasattr(estimator, 'cluster_centers_'):\n components_ = estimator.cluster_centers_\n else:\n components_ = estimator.components_\n\n # Plot an image representing the pixelwise variance provided by the\n # estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,\n # via the PCA decomposition, also provides a scalar noise_variance_\n # (the mean of pixelwise variance) that cannot be displayed as an image\n # so we skip it.\n if (hasattr(estimator, 'noise_variance_') and\n estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case\n plot_gallery(\"Pixelwise variance\",\n estimator.noise_variance_.reshape(1, -1), n_col=1,\n n_row=1)\n plot_gallery('%s - Train time %.1fs' % (name, train_time),\n components_[:n_components])\n\nplt.show()\n\n# #############################################################################\n# Various positivity constraints applied to dictionary learning.\nestimators = [\n ('Dictionary learning',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng),\n True),\n ('Dictionary learning - positive dictionary',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng,\n positive_dict=True),\n True),\n ('Dictionary learning - positive code',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n fit_algorithm='cd',\n random_state=rng,\n positive_code=True),\n True),\n ('Dictionary learning - positive dictionary & code',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n fit_algorithm='cd',\n random_state=rng,\n positive_dict=True,\n positive_code=True),\n True),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\"First centered Olivetti faces\", faces_centered[:n_components],\n cmap=plt.cm.RdBu)\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = (time() - t0)\n print(\"done in %0.3fs\" % train_time)\n components_ = estimator.components_\n plot_gallery(name, components_[:n_components], cmap=plt.cm.RdBu)\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_faces_decomposition.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
# Load faces data
3737
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
3838
faces = dataset.data
39-
4039
n_samples, n_features = faces.shape
4140

4241
# global centering
@@ -81,8 +80,7 @@ def plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):
8180
('Sparse comp. - MiniBatchSparsePCA',
8281
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
8382
n_iter=100, batch_size=3,
84-
random_state=rng,
85-
normalize_components=True),
83+
random_state=rng),
8684
True),
8785

8886
('MiniBatchDictionaryLearning',
@@ -156,12 +154,14 @@ def plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):
156154
('Dictionary learning - positive code',
157155
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
158156
n_iter=50, batch_size=3,
157+
fit_algorithm='cd',
159158
random_state=rng,
160159
positive_code=True),
161160
True),
162161
('Dictionary learning - positive dictionary & code',
163162
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
164163
n_iter=50, batch_size=3,
164+
fit_algorithm='cd',
165165
random_state=rng,
166166
positive_dict=True,
167167
positive_code=True),

0 commit comments

Comments
 (0)