Skip to content

Commit a1d4d18

Browse files
committed
COSMIT pep8
1 parent 364db7a commit a1d4d18

File tree

19 files changed

+39
-37
lines changed

19 files changed

+39
-37
lines changed

sklearn/cluster/spectral.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ class SpectralClustering(BaseEstimator, ClusterMixin):
288288
If affinity is the adjacency matrix of a graph, this method can be
289289
used to find normalized graph cuts.
290290
291-
When calling ``fit``, an affinity matrix is constructed using either
291+
When calling ``fit``, an affinity matrix is constructed using either
292292
kernel function such the Gaussian (aka RBF) kernel of the euclidean
293293
distanced ``d(X, X)``::
294294
@@ -486,5 +486,3 @@ def mode(self):
486486
" 0.15.")
487487
def k(self):
488488
return self.n_clusters
489-
490-

sklearn/datasets/lfw.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
319319
try:
320320
person_folder = join(data_folder_path, name)
321321
except TypeError:
322-
person_folder = join(data_folder_path, str(name,'UTF-8'))
322+
person_folder = join(data_folder_path, str(name, 'UTF-8'))
323323
filenames = list(sorted(listdir(person_folder)))
324324
file_path = join(person_folder, filenames[idx])
325325
file_paths.append(file_path)

sklearn/datasets/svmlight_format.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def load_svmlight_file(f, n_features=None, dtype=np.float64,
5757
when using pairwise loss functions (as is the case in some
5858
learning to rank problems) so that only pairs with the same
5959
query_id value are considered.
60-
60+
6161
This implementation is written in Cython and is reasonably fast.
6262
However, a faster API-compatible loader is also available at:
6363
@@ -246,7 +246,8 @@ def _dump_svmlight(X, y, f, one_based, comment, query_id):
246246
if comment:
247247
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
248248
% __version__))
249-
f.write(b("# Column indices are %s-based\n" % ["zero", "one"][one_based]))
249+
f.write(b("# Column indices are %s-based\n"
250+
% ["zero", "one"][one_based]))
250251

251252
f.write(b("#\n"))
252253
f.writelines(b("# %s\n" % line) for line in comment.splitlines())

sklearn/datasets/tests/test_lfw.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ def setup_module():
9494
first_index = random_state.choice(np.arange(counts[first_name]))
9595
second_index = random_state.choice(np.arange(counts[second_name]))
9696
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
97-
second_name, second_index)))
97+
second_name, second_index)))
9898

9999
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
100100
f.write(six.b("Fake place holder that won't be tested"))

sklearn/datasets/tests/test_mldata.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,8 +128,7 @@ def test_fetch_multiple_column():
128128
dataname = 'threecol-order'
129129
datasets.mldata.urlopen = mock_mldata_urlopen({
130130
dataname: ({'y': y, 'x': x, 'z': z},
131-
['y', 'x', 'z']),
132-
})
131+
['y', 'x', 'z']), })
133132

134133
dset = fetch_mldata(dataname, data_home=tmpdir)
135134
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:

sklearn/decomposition/tests/test_kernel_pca.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,6 @@ def test_remove_zero_eig():
126126
assert_equal(Xt.shape, (3, 0))
127127

128128

129-
130129
def test_kernel_pca_precomputed():
131130
rng = np.random.RandomState(0)
132131
X_fit = rng.random_sample((5, 4))

sklearn/ensemble/partial_dependence.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ def convert_feature(fx):
335335
n_rows = int(np.ceil(len(features) / float(n_cols)))
336336
axs = []
337337
for i, fx, name, (pdp, axes) in zip(count(), features, names,
338-
pd_result):
338+
pd_result):
339339
ax = fig.add_subplot(n_rows, n_cols, i + 1)
340340

341341
if len(axes) == 1:

sklearn/feature_extraction/dict_vectorizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# License: BSD-style.
33

44
from array import array
5-
from collections import Mapping, Sequence
5+
from collections import Mapping
66
from operator import itemgetter
77

88
import numpy as np

sklearn/feature_extraction/text.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -717,15 +717,17 @@ def fit_transform(self, raw_documents, y=None):
717717
max_df = self.max_df
718718
min_df = self.min_df
719719

720-
max_doc_count = (max_df if isinstance(max_df, numbers.Integral)
721-
else max_df * n_doc)
722-
min_doc_count = (min_df if isinstance(min_df, numbers.Integral)
723-
else min_df * n_doc)
720+
max_doc_count = (max_df
721+
if isinstance(max_df, numbers.Integral)
722+
else max_df * n_doc)
723+
min_doc_count = (min_df
724+
if isinstance(min_df, numbers.Integral)
725+
else min_df * n_doc)
724726

725727
# filter out stop words: terms that occur in almost all documents
726728
if max_doc_count < n_doc or min_doc_count > 1:
727729
stop_words = set(t for t, dc in six.iteritems(document_counts)
728-
if not min_doc_count <= dc <= max_doc_count)
730+
if not min_doc_count <= dc <= max_doc_count)
729731
else:
730732
stop_words = set()
731733

sklearn/feature_selection/selector_mixin.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,8 @@ def transform(self, X, threshold=None):
9797
try:
9898
mask = importances >= threshold
9999
except TypeError:
100-
# Fails in Python 3.x when threshold is str; result is array of True
100+
# Fails in Python 3.x when threshold is str;
101+
# result is array of True
101102
raise ValueError("Invalid threshold: all features are discarded.")
102103

103104
if np.any(mask):

0 commit comments

Comments
 (0)