Skip to content

Commit 4ff7e8e

Browse files
taehoonleejnothman
authored andcommitted
Fix typos (scikit-learn#9476)
1 parent 00c0f7b commit 4ff7e8e

File tree

7 files changed

+8
-8
lines changed

7 files changed

+8
-8
lines changed

sklearn/ensemble/gradient_boosting.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -448,7 +448,7 @@ class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
448448
def _score_to_proba(self, score):
449449
"""Template method to convert scores to probabilities.
450450
451-
the does not support probabilites raises AttributeError.
451+
the does not support probabilities raises AttributeError.
452452
"""
453453
raise TypeError('%s does not support predict_proba' % type(self).__name__)
454454

sklearn/ensemble/tests/test_base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def make_steps():
109109
assert_not_equal(est1.get_params()['sel__estimator__random_state'],
110110
est1.get_params()['clf__random_state'])
111111

112-
# ensure multiple random_state paramaters are invariant to get_params()
112+
# ensure multiple random_state parameters are invariant to get_params()
113113
# iteration order
114114

115115
class AlphaParamPipeline(Pipeline):

sklearn/linear_model/tests/test_logistic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -986,7 +986,7 @@ def test_logreg_predict_proba_multinomial():
986986
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
987987
n_classes=3, n_informative=10)
988988

989-
# Predicted probabilites using the true-entropy loss should give a
989+
# Predicted probabilities using the true-entropy loss should give a
990990
# smaller loss than those using the ovr method.
991991
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
992992
clf_multi.fit(X, y)
@@ -996,7 +996,7 @@ def test_logreg_predict_proba_multinomial():
996996
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
997997
assert_greater(clf_ovr_loss, clf_multi_loss)
998998

999-
# Predicted probabilites using the soft-max function should give a
999+
# Predicted probabilities using the soft-max function should give a
10001000
# smaller loss than those using the logistic function.
10011001
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
10021002
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))

sklearn/metrics/ranking.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -840,7 +840,7 @@ def ndcg_score(y_true, y_score, k=5):
840840
"""
841841
y_score, y_true = check_X_y(y_score, y_true)
842842

843-
# Make sure we use all the labels (max between the lenght and the higher
843+
# Make sure we use all the labels (max between the length and the higher
844844
# number in the array)
845845
lb = LabelBinarizer()
846846
lb.fit(np.arange(max(np.max(y_true) + 1, len(y_true))))

sklearn/mixture/dpgmm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def gammaln(x):
4747
@deprecated("The function log_normalize is deprecated in 0.18 and "
4848
"will be removed in 0.20.")
4949
def log_normalize(v, axis=0):
50-
"""Normalized probabilities from unnormalized log-probabilites"""
50+
"""Normalized probabilities from unnormalized log-probabilities"""
5151
v = np.rollaxis(v, axis)
5252
v = v.copy()
5353
v -= v.max(axis=0)

sklearn/multioutput.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ def __init__(self, estimator, n_jobs=1):
316316

317317
def predict_proba(self, X):
318318
"""Probability estimates.
319-
Returns prediction probabilites for each class of each output.
319+
Returns prediction probabilities for each class of each output.
320320
321321
Parameters
322322
----------

sklearn/utils/random.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ def random_choice_csc(n_samples, classes, class_probability=None,
184184
random_state=random_state)
185185
indices.extend(ind_sample)
186186

187-
# Normalize probabilites for the nonzero elements
187+
# Normalize probabilities for the nonzero elements
188188
classes_j_nonzero = classes[j] != 0
189189
class_probability_nz = class_prob_j[classes_j_nonzero]
190190
class_probability_nz_norm = (class_probability_nz /

0 commit comments

Comments
 (0)