|
13 | 13 | from sklearn.gaussian_process import GaussianProcess |
14 | 14 | from sklearn.gaussian_process import regression_models as regression |
15 | 15 | from sklearn.gaussian_process import correlation_models as correlation |
| 16 | +from sklearn.utils.testing import assert_greater |
16 | 17 |
|
17 | 18 |
|
18 | 19 | f = lambda x: x * np.sin(x) |
@@ -148,20 +149,19 @@ def test_random_starts(): |
148 | 149 | Test that an increasing number of random-starts of GP fitting only |
149 | 150 | increases the reduced likelihood function of the optimal theta. |
150 | 151 | """ |
151 | | - n_input_dims = 3 |
152 | | - n_samples = 100 |
| 152 | + n_samples, n_features = 50, 3 |
153 | 153 | np.random.seed(0) |
154 | | - X = np.random.random(n_input_dims*n_samples).reshape(n_samples, |
155 | | - n_input_dims) * 2 - 1 |
156 | | - y = np.sin(X).sum(axis=1) + np.sin(3*X).sum(axis=1) |
| 154 | + rng = np.random.RandomState(0) |
| 155 | + X = rng.randn(n_samples, n_features) * 2 - 1 |
| 156 | + y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) |
157 | 157 | best_likelihood = -np.inf |
158 | | - for random_start in range(1, 10): |
| 158 | + for random_start in range(1, 5): |
159 | 159 | gp = GaussianProcess(regr="constant", corr="squared_exponential", |
160 | | - theta0=[1e-0]*n_input_dims, |
161 | | - thetaL=[1e-4]*n_input_dims, |
162 | | - thetaU=[1e+1]*n_input_dims, |
| 160 | + theta0=[1e-0] * n_features, |
| 161 | + thetaL=[1e-4] * n_features, |
| 162 | + thetaU=[1e+1] * n_features, |
163 | 163 | random_start=random_start, random_state=0, |
164 | 164 | verbose=False).fit(X, y) |
165 | 165 | rlf = gp.reduced_likelihood_function()[0] |
166 | | - assert_true(rlf >= best_likelihood) |
| 166 | + assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps) |
167 | 167 | best_likelihood = rlf |
0 commit comments