@@ -314,7 +314,7 @@ def average_precision_score(y_true, y_score, average="macro",
314314
315315 ----------
316316 y_true : array, shape = [n_samples] or [n_samples, n_classes]
317- True binary labels in binary indicator format .
317+ True binary labels in binary label indicators .
318318
319319 y_score : array, shape = [n_samples] or [n_samples, n_classes]
320320 Target scores, can either be probability estimates of the positive
@@ -426,7 +426,7 @@ def _average_binary_score(binary_metric, y_true, y_score, average,
426426 Parameters
427427 ----------
428428 y_true : array, shape = [n_samples] or [n_samples, n_classes]
429- True binary labels in binary indicator format .
429+ True binary labels in binary label indicators .
430430
431431 y_score : array, shape = [n_samples] or [n_samples, n_classes]
432432 Target scores, can either be probability estimates of the positive
@@ -527,7 +527,7 @@ def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
527527 Parameters
528528 ----------
529529 y_true : array, shape = [n_samples] or [n_samples, n_classes]
530- True binary labels in binary indicator format .
530+ True binary labels in binary label indicators .
531531
532532 y_score : array, shape = [n_samples] or [n_samples, n_classes]
533533 Target scores, can either be probability estimates of the positive
@@ -986,10 +986,10 @@ def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
986986
987987 Parameters
988988 ----------
989- y_true : array-like or list of labels or label indicator matrix
989+ y_true : array-like or label indicator matrix
990990 Ground truth (correct) labels.
991991
992- y_pred : array-like or list of labels or label indicator matrix
992+ y_pred : array-like or label indicator matrix
993993 Predicted labels, as returned by a classifier.
994994
995995 normalize : bool, optional (default=True)
@@ -1025,17 +1025,10 @@ def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
10251025 >>> zero_one_loss(y_true, y_pred, normalize=False)
10261026 1
10271027
1028- In the multilabel case with binary indicator format :
1028+ In the multilabel case with binary label indicators :
10291029
1030- >>> zero_one_loss(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]), np.ones((2, 2)))
1030+ >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
10311031 0.5
1032-
1033- and with a list of labels format:
1034-
1035- >>> zero_one_loss([(1, ), (3, )], [(1, 2), tuple()])
1036- 1.0
1037-
1038-
10391032 """
10401033 score = accuracy_score (y_true , y_pred ,
10411034 normalize = normalize ,
@@ -1064,7 +1057,7 @@ def log_loss(y_true, y_pred, eps=1e-15, normalize=True):
10641057
10651058 Parameters
10661059 ----------
1067- y_true : array-like or list of labels or label indicator matrix
1060+ y_true : array-like or label indicator matrix
10681061 Ground truth (correct) labels for n_samples samples.
10691062
10701063 y_pred : array-like of float, shape = (n_samples, n_classes)
@@ -1139,10 +1132,10 @@ def jaccard_similarity_score(y_true, y_pred, normalize=True):
11391132
11401133 Parameters
11411134 ----------
1142- y_true : array-like or list of labels or label indicator matrix
1135+ y_true : array-like or label indicator matrix
11431136 Ground truth (correct) labels.
11441137
1145- y_pred : array-like or list of labels or label indicator matrix
1138+ y_pred : array-like or label indicator matrix
11461139 Predicted labels, as returned by a classifier.
11471140
11481141 normalize : bool, optional (default=True)
@@ -1187,17 +1180,11 @@ def jaccard_similarity_score(y_true, y_pred, normalize=True):
11871180 >>> jaccard_similarity_score(y_true, y_pred, normalize=False)
11881181 2
11891182
1190- In the multilabel case with binary indicator format :
1183+ In the multilabel case with binary label indicators :
11911184
1192- >>> jaccard_similarity_score(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]),\
1185+ >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
11931186 np.ones((2, 2)))
11941187 0.75
1195-
1196- and with a list of labels format:
1197-
1198- >>> jaccard_similarity_score([(1, ), (3, )], [(1, 2), tuple()])
1199- 0.25
1200-
12011188 """
12021189
12031190 # Compute accuracy for each possible representation
@@ -1252,10 +1239,10 @@ def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
12521239
12531240 Parameters
12541241 ----------
1255- y_true : array-like or list of labels or label indicator matrix
1242+ y_true : array-like or label indicator matrix
12561243 Ground truth (correct) labels.
12571244
1258- y_pred : array-like or list of labels or label indicator matrix
1245+ y_pred : array-like or label indicator matrix
12591246 Predicted labels, as returned by a classifier.
12601247
12611248 normalize : bool, optional (default=True)
@@ -1295,16 +1282,10 @@ def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
12951282 >>> accuracy_score(y_true, y_pred, normalize=False)
12961283 2
12971284
1298- In the multilabel case with binary indicator format :
1285+ In the multilabel case with binary label indicators :
12991286
1300- >>> accuracy_score(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]), np.ones((2, 2)))
1287+ >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
13011288 0.5
1302-
1303- and with a list of labels format:
1304-
1305- >>> accuracy_score([(1, ), (3, )], [(1, 2), tuple()])
1306- 0.0
1307-
13081289 """
13091290
13101291 # Compute accuracy for each possible representation
@@ -1343,10 +1324,10 @@ def f1_score(y_true, y_pred, labels=None, pos_label=1, average='weighted',
13431324
13441325 Parameters
13451326 ----------
1346- y_true : array-like or list of labels or label indicator matrix
1327+ y_true : array-like or label indicator matrix
13471328 Ground truth (correct) target values.
13481329
1349- y_pred : array-like or list of labels or label indicator matrix
1330+ y_pred : array-like or label indicator matrix
13501331 Estimated targets as returned by a classifier.
13511332
13521333 labels : array
@@ -1426,10 +1407,10 @@ def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
14261407
14271408 Parameters
14281409 ----------
1429- y_true : array-like or list of labels or label indicator matrix
1410+ y_true : array-like or label indicator matrix
14301411 Ground truth (correct) target values.
14311412
1432- y_pred : array-like or list of labels or label indicator matrix
1413+ y_pred : array-like or label indicator matrix
14331414 Estimated targets as returned by a classifier.
14341415
14351416 beta: float
@@ -1585,10 +1566,10 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
15851566
15861567 Parameters
15871568 ----------
1588- y_true : array-like or list of labels or label indicator matrix
1569+ y_true : array-like or label indicator matrix
15891570 Ground truth (correct) target values.
15901571
1591- y_pred : array-like or list of labels or label indicator matrix
1572+ y_pred : array-like or label indicator matrix
15921573 Estimated targets as returned by a classifier.
15931574
15941575 beta : float, 1.0 by default
@@ -1830,10 +1811,10 @@ def precision_score(y_true, y_pred, labels=None, pos_label=1,
18301811
18311812 Parameters
18321813 ----------
1833- y_true : array-like or list of labels or label indicator matrix
1814+ y_true : array-like or label indicator matrix
18341815 Ground truth (correct) target values.
18351816
1836- y_pred : array-like or list of labels or label indicator matrix
1817+ y_pred : array-like or label indicator matrix
18371818 Estimated targets as returned by a classifier.
18381819
18391820 labels : array
@@ -1912,10 +1893,10 @@ def recall_score(y_true, y_pred, labels=None, pos_label=1, average='weighted',
19121893
19131894 Parameters
19141895 ----------
1915- y_true : array-like or list of labels or label indicator matrix
1896+ y_true : array-like or label indicator matrix
19161897 Ground truth (correct) target values.
19171898
1918- y_pred : array-like or list of labels or label indicator matrix
1899+ y_pred : array-like or label indicator matrix
19191900 Estimated targets as returned by a classifier.
19201901
19211902 labels : array
@@ -1987,10 +1968,10 @@ def classification_report(y_true, y_pred, labels=None, target_names=None,
19871968
19881969 Parameters
19891970 ----------
1990- y_true : array-like or list of labels or label indicator matrix
1971+ y_true : array-like or label indicator matrix
19911972 Ground truth (correct) target values.
19921973
1993- y_pred : array-like or list of labels or label indicator matrix
1974+ y_pred : array-like or label indicator matrix
19941975 Estimated targets as returned by a classifier.
19951976
19961977 labels : array, shape = [n_labels]
@@ -2081,10 +2062,10 @@ def hamming_loss(y_true, y_pred, classes=None):
20812062
20822063 Parameters
20832064 ----------
2084- y_true : array-like or list of labels or label indicator matrix
2065+ y_true : array-like or label indicator matrix
20852066 Ground truth (correct) labels.
20862067
2087- y_pred : array-like or list of labels or label indicator matrix
2068+ y_pred : array-like or label indicator matrix
20882069 Predicted labels, as returned by a classifier.
20892070
20902071 classes : array, shape = [n_labels], optional
@@ -2132,16 +2113,10 @@ def hamming_loss(y_true, y_pred, classes=None):
21322113 >>> hamming_loss(y_true, y_pred)
21332114 0.25
21342115
2135- In the multilabel case with binary indicator format :
2116+ In the multilabel case with binary label indicators :
21362117
2137- >>> hamming_loss(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]), np.zeros((2, 2)))
2118+ >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
21382119 0.75
2139-
2140- and with a list of labels format:
2141-
2142- >>> hamming_loss([(1, 2), (3, )], [(1, 2), tuple()]) # doctest: +ELLIPSIS
2143- 0.166...
2144-
21452120 """
21462121 y_type , y_true , y_pred = _check_clf_targets (y_true , y_pred )
21472122
0 commit comments