Skip to content

Commit c4d0909

Browse files
Exercise5 - Week6 Finished
1 parent a080069 commit c4d0909

File tree

4 files changed

+25
-18
lines changed

4 files changed

+25
-18
lines changed

machine-learning-ex5/ex5/learningCurve.m

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,13 @@
5353

5454
% ---------------------- Sample Solution ----------------------
5555

56-
56+
for last = 1:m
57+
X_train = X(1:last,:);
58+
y_train = y(1:last);
59+
theta = trainLinearReg(X_train, y_train, lambda);
60+
[error_train(last),] = linearRegCostFunction(X_train, y_train, theta, 0);
61+
[error_val(last),] = linearRegCostFunction(Xval, yval, theta, 0);
62+
end
5763

5864

5965

machine-learning-ex5/ex5/linearRegCostFunction.m

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@
77

88
% Initialize some useful values
99
m = length(y); % number of training examples
10-
10+
theta_temp = theta;
11+
theta_temp(1) = 0;
1112
% You need to return the following variables correctly
1213
J = 0;
1314
grad = zeros(size(theta));
@@ -18,16 +19,9 @@
1819
%
1920
% You should set J to the cost and grad to the gradient.
2021
%
21-
22-
23-
24-
25-
26-
27-
28-
29-
30-
22+
h = X * theta;
23+
J = sum((h - y).^2) / (2 * m) + lambda * sum(theta_temp.^2) / (2 * m);
24+
grad = (X' * (h - y)) / m + lambda * theta_temp / m;
3125

3226

3327
% =========================================================================

machine-learning-ex5/ex5/polyFeatures.m

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@
1515
%
1616
%
1717

18-
18+
for i = 1:p
19+
X_poly(:,i) = X.^i;
20+
end
1921

2022

2123

machine-learning-ex5/ex5/validationCurve.m

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,11 @@
1111

1212
% Selected values of lambda (you should not change this)
1313
lambda_vec = [0 0.001 0.003 0.01 0.03 0.1 0.3 1 3 10]';
14-
14+
% Calculate the length of the lambda_vec
15+
n_lambda = length(lambda_vec);
1516
% You need to return these variables correctly.
16-
error_train = zeros(length(lambda_vec), 1);
17-
error_val = zeros(length(lambda_vec), 1);
17+
error_train = zeros(n_lambda, 1);
18+
error_val = zeros(n_lambda, 1);
1819

1920
% ====================== YOUR CODE HERE ======================
2021
% Instructions: Fill in this function to return training errors in
@@ -39,8 +40,12 @@
3940
%
4041
%
4142

42-
43-
43+
for i = 1:n_lambda
44+
lambda = lambda_vec(i);
45+
theta = trainLinearReg(X, y, lambda);
46+
[error_train(i),] = linearRegCostFunction(X, y, theta, 0);
47+
[error_val(i),] = linearRegCostFunction(Xval, yval, theta, 0);
48+
end
4449

4550

4651

0 commit comments

Comments
 (0)