|
| 1 | +"""Author: Arthur Mensch |
| 2 | +
|
| 3 | +Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain |
| 4 | +in using multinomial logistic regression in term of learning time. |
| 5 | +""" |
| 6 | +import json |
| 7 | +import time |
| 8 | +from os.path import expanduser |
| 9 | + |
| 10 | +import matplotlib.pyplot as plt |
| 11 | +import numpy as np |
| 12 | + |
| 13 | +from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \ |
| 14 | + fetch_20newsgroups_vectorized |
| 15 | +from sklearn.externals.joblib import delayed, Parallel, Memory |
| 16 | +from sklearn.linear_model import LogisticRegression |
| 17 | +from sklearn.metrics import log_loss |
| 18 | +from sklearn.model_selection import train_test_split |
| 19 | +from sklearn.preprocessing import LabelBinarizer, LabelEncoder |
| 20 | +from sklearn.utils.extmath import safe_sparse_dot, softmax |
| 21 | + |
| 22 | + |
| 23 | +def fit_single(solver, X, y, penalty='l2', single_target=True, C=1, |
| 24 | + max_iter=10, skip_slow=False): |
| 25 | + if skip_slow and solver == 'lightning' and penalty == 'l1': |
| 26 | + print('skip_slowping l1 logistic regression with solver lightning.') |
| 27 | + return |
| 28 | + |
| 29 | + print('Solving %s logistic regression with penalty %s, solver %s.' |
| 30 | + % ('binary' if single_target else 'multinomial', |
| 31 | + penalty, solver)) |
| 32 | + |
| 33 | + if solver == 'lightning': |
| 34 | + from lightning.classification import SAGAClassifier |
| 35 | + |
| 36 | + if single_target or solver not in ['sag', 'saga']: |
| 37 | + multi_class = 'ovr' |
| 38 | + else: |
| 39 | + multi_class = 'multinomial' |
| 40 | + |
| 41 | + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, |
| 42 | + stratify=y) |
| 43 | + n_samples = X_train.shape[0] |
| 44 | + n_classes = np.unique(y_train).shape[0] |
| 45 | + test_scores = [1] |
| 46 | + train_scores = [1] |
| 47 | + accuracies = [1 / n_classes] |
| 48 | + times = [0] |
| 49 | + |
| 50 | + if penalty == 'l2': |
| 51 | + alpha = 1. / (C * n_samples) |
| 52 | + beta = 0 |
| 53 | + lightning_penalty = None |
| 54 | + else: |
| 55 | + alpha = 0. |
| 56 | + beta = 1. / (C * n_samples) |
| 57 | + lightning_penalty = 'l1' |
| 58 | + |
| 59 | + for this_max_iter in range(1, max_iter + 1, 2): |
| 60 | + print('[%s, %s, %s] Max iter: %s' % |
| 61 | + ('binary' if single_target else 'multinomial', |
| 62 | + penalty, solver, this_max_iter)) |
| 63 | + if solver == 'lightning': |
| 64 | + lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta, |
| 65 | + penalty=lightning_penalty, |
| 66 | + tol=-1, max_iter=this_max_iter) |
| 67 | + else: |
| 68 | + lr = LogisticRegression(solver=solver, |
| 69 | + multi_class=multi_class, |
| 70 | + C=C, |
| 71 | + penalty=penalty, |
| 72 | + fit_intercept=False, tol=1e-24, |
| 73 | + max_iter=this_max_iter, |
| 74 | + random_state=42, |
| 75 | + ) |
| 76 | + t0 = time.clock() |
| 77 | + lr.fit(X_train, y_train) |
| 78 | + train_time = time.clock() - t0 |
| 79 | + |
| 80 | + scores = [] |
| 81 | + for (X, y) in [(X_train, y_train), (X_test, y_test)]: |
| 82 | + try: |
| 83 | + y_pred = lr.predict_proba(X) |
| 84 | + except NotImplementedError: |
| 85 | + # Lightning predict_proba is not implemented for n_classes > 2 |
| 86 | + y_pred = _predict_proba(lr, X) |
| 87 | + score = log_loss(y, y_pred, normalize=False) / n_samples |
| 88 | + score += (0.5 * alpha * np.sum(lr.coef_ ** 2) + |
| 89 | + beta * np.sum(np.abs(lr.coef_))) |
| 90 | + scores.append(score) |
| 91 | + train_score, test_score = tuple(scores) |
| 92 | + |
| 93 | + y_pred = lr.predict(X_test) |
| 94 | + accuracy = np.sum(y_pred == y_test) / y_test.shape[0] |
| 95 | + test_scores.append(test_score) |
| 96 | + train_scores.append(train_score) |
| 97 | + accuracies.append(accuracy) |
| 98 | + times.append(train_time) |
| 99 | + return lr, times, train_scores, test_scores, accuracies |
| 100 | + |
| 101 | + |
| 102 | +def _predict_proba(lr, X): |
| 103 | + pred = safe_sparse_dot(X, lr.coef_.T) |
| 104 | + if hasattr(lr, "intercept_"): |
| 105 | + pred += lr.intercept_ |
| 106 | + return softmax(pred) |
| 107 | + |
| 108 | + |
| 109 | +def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20, |
| 110 | + dataset='rcv1', n_jobs=1, skip_slow=False): |
| 111 | + mem = Memory(cachedir=expanduser('~/cache'), verbose=0) |
| 112 | + |
| 113 | + if dataset == 'rcv1': |
| 114 | + rcv1 = fetch_rcv1() |
| 115 | + |
| 116 | + lbin = LabelBinarizer() |
| 117 | + lbin.fit(rcv1.target_names) |
| 118 | + |
| 119 | + X = rcv1.data |
| 120 | + y = rcv1.target |
| 121 | + y = lbin.inverse_transform(y) |
| 122 | + le = LabelEncoder() |
| 123 | + y = le.fit_transform(y) |
| 124 | + if single_target: |
| 125 | + y_n = y.copy() |
| 126 | + y_n[y > 16] = 1 |
| 127 | + y_n[y <= 16] = 0 |
| 128 | + y = y_n |
| 129 | + |
| 130 | + elif dataset == 'digits': |
| 131 | + digits = load_digits() |
| 132 | + X, y = digits.data, digits.target |
| 133 | + if single_target: |
| 134 | + y_n = y.copy() |
| 135 | + y_n[y < 5] = 1 |
| 136 | + y_n[y >= 5] = 0 |
| 137 | + y = y_n |
| 138 | + elif dataset == 'iris': |
| 139 | + iris = load_iris() |
| 140 | + X, y = iris.data, iris.target |
| 141 | + elif dataset == '20newspaper': |
| 142 | + ng = fetch_20newsgroups_vectorized() |
| 143 | + X = ng.data |
| 144 | + y = ng.target |
| 145 | + if single_target: |
| 146 | + y_n = y.copy() |
| 147 | + y_n[y > 4] = 1 |
| 148 | + y_n[y <= 16] = 0 |
| 149 | + y = y_n |
| 150 | + |
| 151 | + X = X[:n_samples] |
| 152 | + y = y[:n_samples] |
| 153 | + |
| 154 | + cached_fit = mem.cache(fit_single) |
| 155 | + out = Parallel(n_jobs=n_jobs, mmap_mode=None)( |
| 156 | + delayed(cached_fit)(solver, X, y, |
| 157 | + penalty=penalty, single_target=single_target, |
| 158 | + C=1, max_iter=max_iter, skip_slow=skip_slow) |
| 159 | + for solver in solvers |
| 160 | + for penalty in penalties) |
| 161 | + |
| 162 | + res = [] |
| 163 | + idx = 0 |
| 164 | + for solver in solvers: |
| 165 | + for penalty in penalties: |
| 166 | + if not (skip_slow and solver == 'lightning' and penalty == 'l1'): |
| 167 | + lr, times, train_scores, test_scores, accuracies = out[idx] |
| 168 | + this_res = dict(solver=solver, penalty=penalty, |
| 169 | + single_target=single_target, |
| 170 | + times=times, train_scores=train_scores, |
| 171 | + test_scores=test_scores, |
| 172 | + accuracies=accuracies) |
| 173 | + res.append(this_res) |
| 174 | + idx += 1 |
| 175 | + |
| 176 | + with open('bench_saga.json', 'w+') as f: |
| 177 | + json.dump(res, f) |
| 178 | + |
| 179 | + |
| 180 | +def plot(): |
| 181 | + import pandas as pd |
| 182 | + with open('bench_saga.json', 'r') as f: |
| 183 | + f = json.load(f) |
| 184 | + res = pd.DataFrame(f) |
| 185 | + res.set_index(['single_target', 'penalty'], inplace=True) |
| 186 | + |
| 187 | + grouped = res.groupby(level=['single_target', 'penalty']) |
| 188 | + |
| 189 | + colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'} |
| 190 | + |
| 191 | + for idx, group in grouped: |
| 192 | + single_target, penalty = idx |
| 193 | + fig = plt.figure(figsize=(12, 4)) |
| 194 | + ax = fig.add_subplot(131) |
| 195 | + |
| 196 | + train_scores = group['train_scores'].values |
| 197 | + ref = np.min(np.concatenate(train_scores)) * 0.999 |
| 198 | + |
| 199 | + for scores, times, solver in zip(group['train_scores'], group['times'], |
| 200 | + group['solver']): |
| 201 | + scores = scores / ref - 1 |
| 202 | + ax.plot(times, scores, label=solver, color=colors[solver]) |
| 203 | + ax.set_xlabel('Time (s)') |
| 204 | + ax.set_ylabel('Training objective (relative to min)') |
| 205 | + ax.set_yscale('log') |
| 206 | + |
| 207 | + ax = fig.add_subplot(132) |
| 208 | + |
| 209 | + test_scores = group['test_scores'].values |
| 210 | + ref = np.min(np.concatenate(test_scores)) * 0.999 |
| 211 | + |
| 212 | + for scores, times, solver in zip(group['test_scores'], group['times'], |
| 213 | + group['solver']): |
| 214 | + scores = scores / ref - 1 |
| 215 | + ax.plot(times, scores, label=solver, color=colors[solver]) |
| 216 | + ax.set_xlabel('Time (s)') |
| 217 | + ax.set_ylabel('Test objective (relative to min)') |
| 218 | + ax.set_yscale('log') |
| 219 | + |
| 220 | + ax = fig.add_subplot(133) |
| 221 | + |
| 222 | + for accuracy, times, solver in zip(group['accuracies'], group['times'], |
| 223 | + group['solver']): |
| 224 | + ax.plot(times, accuracy, label=solver, color=colors[solver]) |
| 225 | + ax.set_xlabel('Time (s)') |
| 226 | + ax.set_ylabel('Test accuracy') |
| 227 | + ax.legend() |
| 228 | + name = 'single_target' if single_target else 'multi_target' |
| 229 | + name += '_%s' % penalty |
| 230 | + plt.suptitle(name) |
| 231 | + name += '.png' |
| 232 | + fig.tight_layout() |
| 233 | + fig.subplots_adjust(top=0.9) |
| 234 | + plt.savefig(name) |
| 235 | + plt.close(fig) |
| 236 | + |
| 237 | + |
| 238 | +if __name__ == '__main__': |
| 239 | + solvers = ['saga', 'liblinear', 'lightning'] |
| 240 | + penalties = ['l1', 'l2'] |
| 241 | + single_target = True |
| 242 | + exp(solvers, penalties, single_target, n_samples=None, n_jobs=1, |
| 243 | + dataset='20newspaper', max_iter=20) |
| 244 | + plot() |
0 commit comments