Skip to content

Commit 2ed18e0

Browse files
adrinjalalijnothman
authored andcommitted
[MRG] DOC Examples added to the rest of linear models (scikit-learn#11975)
1 parent f71de6f commit 2ed18e0

File tree

7 files changed

+139
-0
lines changed

7 files changed

+139
-0
lines changed

sklearn/linear_model/base.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -399,6 +399,23 @@ class LinearRegression(LinearModel, RegressorMixin):
399399
intercept_ : array
400400
Independent term in the linear model.
401401
402+
Examples
403+
--------
404+
>>> import numpy as np
405+
>>> from sklearn.linear_model import LinearRegression
406+
>>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
407+
>>> # y = 1 * x_0 + 2 * x_1 + 3
408+
>>> y = np.dot(X, np.array([1, 2])) + 3
409+
>>> reg = LinearRegression().fit(X, y)
410+
>>> reg.score(X, y)
411+
1.0
412+
>>> reg.coef_
413+
array([1., 2.])
414+
>>> reg.intercept_ # doctest: +ELLIPSIS
415+
3.0000...
416+
>>> reg.predict(np.array([[3, 5]]))
417+
array([16.])
418+
402419
Notes
403420
-----
404421
From the implementation point of view, this is just plain Ordinary

sklearn/linear_model/coordinate_descent.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1368,6 +1368,17 @@ class LassoCV(LinearModelCV, RegressorMixin):
13681368
number of iterations run by the coordinate descent solver to reach
13691369
the specified tolerance for the optimal alpha.
13701370
1371+
Examples
1372+
--------
1373+
>>> from sklearn.linear_model import LassoCV
1374+
>>> from sklearn.datasets import make_regression
1375+
>>> X, y = make_regression(noise=4, random_state=0)
1376+
>>> reg = LassoCV(cv=5, random_state=0).fit(X, y)
1377+
>>> reg.score(X, y) # doctest: +ELLIPSIS
1378+
0.9993...
1379+
>>> reg.predict(X[:1,])
1380+
array([-78.4951...])
1381+
13711382
Notes
13721383
-----
13731384
For an example, see
@@ -2235,6 +2246,19 @@ class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
22352246
number of iterations run by the coordinate descent solver to reach
22362247
the specified tolerance for the optimal alpha.
22372248
2249+
Examples
2250+
--------
2251+
>>> from sklearn.linear_model import MultiTaskLassoCV
2252+
>>> from sklearn.datasets import make_regression
2253+
>>> X, y = make_regression(n_targets=2, noise=4, random_state=0)
2254+
>>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y)
2255+
>>> reg.score(X, y) # doctest: +ELLIPSIS
2256+
0.9994...
2257+
>>> reg.alpha_
2258+
0.5713...
2259+
>>> reg.predict(X[:1,])
2260+
array([[153.7971..., 94.9015...]])
2261+
22382262
See also
22392263
--------
22402264
MultiTaskElasticNet

sklearn/linear_model/huber.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,29 @@ class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
192192
A boolean mask which is set to True where the samples are identified
193193
as outliers.
194194
195+
Examples
196+
--------
197+
>>> import numpy as np
198+
>>> from sklearn.linear_model import HuberRegressor, LinearRegression
199+
>>> from sklearn.datasets import make_regression
200+
>>> np.random.seed(0)
201+
>>> X, y, coef = make_regression(
202+
... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0)
203+
>>> X[:4] = np.random.uniform(10, 20, (4, 2))
204+
>>> y[:4] = np.random.uniform(10, 20, 4)
205+
>>> huber = HuberRegressor().fit(X, y)
206+
>>> huber.score(X, y) # doctest: +ELLIPSIS
207+
-7.284608623514573
208+
>>> huber.predict(X[:1,])
209+
array([806.7200...])
210+
>>> linear = LinearRegression().fit(X, y)
211+
>>> print("True coefficients:", coef)
212+
True coefficients: [20.4923... 34.1698...]
213+
>>> print("Huber coefficients:", huber.coef_)
214+
Huber coefficients: [17.7906... 31.0106...]
215+
>>> print("Linear Regression coefficients:", linear.coef_)
216+
Linear Regression coefficients: [-1.9221... 7.0226...]
217+
195218
References
196219
----------
197220
.. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics

sklearn/linear_model/least_angle.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1070,6 +1070,19 @@ class LarsCV(Lars):
10701070
n_iter_ : array-like or int
10711071
the number of iterations run by Lars with the optimal alpha.
10721072
1073+
Examples
1074+
--------
1075+
>>> from sklearn.linear_model import LarsCV
1076+
>>> from sklearn.datasets import make_regression
1077+
>>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)
1078+
>>> reg = LarsCV(cv=5).fit(X, y)
1079+
>>> reg.score(X, y) # doctest: +ELLIPSIS
1080+
0.9996...
1081+
>>> reg.alpha_
1082+
0.0254...
1083+
>>> reg.predict(X[:1,])
1084+
array([154.0842...])
1085+
10731086
See also
10741087
--------
10751088
lars_path, LassoLars, LassoLarsCV
@@ -1290,6 +1303,19 @@ class LassoLarsCV(LarsCV):
12901303
n_iter_ : array-like or int
12911304
the number of iterations run by Lars with the optimal alpha.
12921305
1306+
Examples
1307+
--------
1308+
>>> from sklearn.linear_model import LassoLarsCV
1309+
>>> from sklearn.datasets import make_regression
1310+
>>> X, y = make_regression(noise=4.0, random_state=0)
1311+
>>> reg = LassoLarsCV(cv=5).fit(X, y)
1312+
>>> reg.score(X, y) # doctest: +ELLIPSIS
1313+
0.9992...
1314+
>>> reg.alpha_
1315+
0.0484...
1316+
>>> reg.predict(X[:1,])
1317+
array([-77.8723...])
1318+
12931319
Notes
12941320
-----
12951321

sklearn/linear_model/omp.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -583,6 +583,17 @@ class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
583583
n_iter_ : int or array-like
584584
Number of active features across every target.
585585
586+
Examples
587+
--------
588+
>>> from sklearn.linear_model import OrthogonalMatchingPursuit
589+
>>> from sklearn.datasets import make_regression
590+
>>> X, y = make_regression(noise=4, random_state=0)
591+
>>> reg = OrthogonalMatchingPursuit().fit(X, y)
592+
>>> reg.score(X, y) # doctest: +ELLIPSIS
593+
0.9991...
594+
>>> reg.predict(X[:1,])
595+
array([-78.3854...])
596+
586597
Notes
587598
-----
588599
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
@@ -814,6 +825,20 @@ class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
814825
Number of active features across every target for the model refit with
815826
the best hyperparameters got by cross-validating across all folds.
816827
828+
Examples
829+
--------
830+
>>> from sklearn.linear_model import OrthogonalMatchingPursuitCV
831+
>>> from sklearn.datasets import make_regression
832+
>>> X, y = make_regression(n_features=100, n_informative=10,
833+
... noise=4, random_state=0)
834+
>>> reg = OrthogonalMatchingPursuitCV(cv=5).fit(X, y)
835+
>>> reg.score(X, y) # doctest: +ELLIPSIS
836+
0.9991...
837+
>>> reg.n_nonzero_coefs_
838+
10
839+
>>> reg.predict(X[:1,])
840+
array([-78.3854...])
841+
817842
See also
818843
--------
819844
orthogonal_mp

sklearn/linear_model/ransac.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,18 @@ class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
186186
187187
.. versionadded:: 0.19
188188
189+
Examples
190+
--------
191+
>>> from sklearn.linear_model import RANSACRegressor
192+
>>> from sklearn.datasets import make_regression
193+
>>> X, y = make_regression(
194+
... n_samples=200, n_features=2, noise=4.0, random_state=0)
195+
>>> reg = RANSACRegressor(random_state=0).fit(X, y)
196+
>>> reg.score(X, y) # doctest: +ELLIPSIS
197+
0.9885...
198+
>>> reg.predict(X[:1,])
199+
array([-31.9417...])
200+
189201
References
190202
----------
191203
.. [1] https://en.wikipedia.org/wiki/RANSAC

sklearn/linear_model/theil_sen.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -276,6 +276,18 @@ class TheilSenRegressor(LinearModel, RegressorMixin):
276276
Number of combinations taken into account from 'n choose k', where n is
277277
the number of samples and k is the number of subsamples.
278278
279+
Examples
280+
--------
281+
>>> from sklearn.linear_model import TheilSenRegressor
282+
>>> from sklearn.datasets import make_regression
283+
>>> X, y = make_regression(
284+
... n_samples=200, n_features=2, noise=4.0, random_state=0)
285+
>>> reg = TheilSenRegressor(random_state=0).fit(X, y)
286+
>>> reg.score(X, y) # doctest: +ELLIPSIS
287+
0.9884...
288+
>>> reg.predict(X[:1,])
289+
array([-31.5871...])
290+
279291
References
280292
----------
281293
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009

0 commit comments

Comments
 (0)