diff --git a/Exercise1/exercise1.ipynb b/Exercise1/exercise1.ipynb index 38a9faed..6f921a48 100755 --- a/Exercise1/exercise1.ipynb +++ b/Exercise1/exercise1.ipynb @@ -371,7 +371,7 @@ "\n", "As you perform gradient descent to learn minimize the cost function $J(\\theta)$, it is helpful to monitor the convergence by computing the cost. In this section, you will implement a function to calculate $J(\\theta)$ so you can check the convergence of your gradient descent implementation. \n", "\n", - "Your next task is to complete the code for the function `computeCost` which computes $J(\\theta)$. As you are doing this, remember that the variables $X$ and $y$ are not scalar values. $X$ is a matrix whose rows represent the examples from the training set and $y$ is a vector whose each elemennt represent the value at a given row of $X$.\n", + "Your next task is to complete the code for the function `computeCost` which computes $J(\\theta)$. As you are doing this, remember that the variables $X$ and $y$ are not scalar values. $X$ is a matrix whose rows represent the examples from the training set and $y$ is a vector whose each element represent the value at a given row of $X$.\n", "" ] }, diff --git a/Exercise1/utils.py b/Exercise1/utils.py index d0c909d5..b92e3cf5 100755 --- a/Exercise1/utils.py +++ b/Exercise1/utils.py @@ -19,7 +19,9 @@ def __init__(self): 'Computing Cost (for multiple variables)', 'Gradient Descent (for multiple variables)', 'Normal Equations'] - super().__init__('linear-regression', part_names) + part_names_key = ['DCRbJ', 'BGa4S', 'b65eO', 'BbS8u', 'FBlE2', 'RZAZC', '7m5Eu'] + assignment_key = 'UkTlA-FyRRKV5ooohuwU6A' + super().__init__('linear-regression', assignment_key, part_names, part_names_key) def __iter__(self): for part_id in range(1, 8): diff --git a/Exercise2/utils.py b/Exercise2/utils.py index 7c52dbe4..8e5e6a98 100755 --- a/Exercise2/utils.py +++ b/Exercise2/utils.py @@ -119,7 +119,9 @@ def __init__(self): 'Predict', 'Regularized Logistic Regression Cost', 'Regularized Logistic Regression Gradient'] - super().__init__('logistic-regression', part_names) + part_names_key = ['sFxIn', 'yvXBE', 'HerlY', '9fxV6', 'OddeL', 'aUo3H'] + assignment_key = 'JvOPouj-S-ys8KjYcPYqrg' + super().__init__('logistic-regression', assignment_key, part_names, part_names_key) def __iter__(self): for part_id in range(1, 7): diff --git a/Exercise3/exercise3.ipynb b/Exercise3/exercise3.ipynb index 33e782af..044d3634 100755 --- a/Exercise3/exercise3.ipynb +++ b/Exercise3/exercise3.ipynb @@ -373,7 +373,7 @@ "$$\n", "\\begin{align*}\n", "& \\frac{\\partial J(\\theta)}{\\partial \\theta_0} = \\frac{1}{m} \\sum_{i=1}^m \\left( h_\\theta\\left( x^{(i)} \\right) - y^{(i)} \\right) x_j^{(i)} & \\text{for } j = 0 \\\\\n", - "& \\frac{\\partial J(\\theta)}{\\partial \\theta_0} = \\left( \\frac{1}{m} \\sum_{i=1}^m \\left( h_\\theta\\left( x^{(i)} \\right) - y^{(i)} \\right) x_j^{(i)} \\right) + \\frac{\\lambda}{m} \\theta_j & \\text{for } j \\ge 1\n", + "& \\frac{\\partial J(\\theta)}{\\partial \\theta_j} = \\left( \\frac{1}{m} \\sum_{i=1}^m \\left( h_\\theta\\left( x^{(i)} \\right) - y^{(i)} \\right) x_j^{(i)} \\right) + \\frac{\\lambda}{m} \\theta_j & \\text{for } j \\ge 1\n", "\\end{align*}\n", "$$\n", "\n", diff --git a/Exercise3/utils.py b/Exercise3/utils.py index 633a5636..4bf99715 100755 --- a/Exercise3/utils.py +++ b/Exercise3/utils.py @@ -79,8 +79,9 @@ def __init__(self): 'One-vs-All Classifier Training', 'One-vs-All Classifier Prediction', 'Neural Network Prediction Function'] - - super().__init__('multi-class-classification-and-neural-networks', part_names) + part_names_key = ['jzAIf', 'LjDnh', '3yxcY', 'yNspP'] + assignment_key = '2KZRbGlpQnyzVI8Ki4uXjw' + super().__init__('multi-class-classification-and-neural-networks', assignment_key, part_names, part_names_key) def __iter__(self): for part_id in range(1, 5): diff --git a/Exercise4/utils.py b/Exercise4/utils.py index 6b7c3bdc..6d18b86f 100755 --- a/Exercise4/utils.py +++ b/Exercise4/utils.py @@ -193,7 +193,9 @@ def __init__(self): 'Sigmoid Gradient', 'Neural Network Gradient (Backpropagation)', 'Regularized Gradient'] - super().__init__('neural-network-learning', part_names) + part_names_key = ['aAiP2', '8ajiz', 'rXsEO', 'TvZch', 'pfIYT'] + assignment_key = 'xolSVXukR72JH37bfzo0pg' + super().__init__('neural-network-learning', assignment_key, part_names, part_names_key) def __iter__(self): for part_id in range(1, 6): diff --git a/Exercise5/exercise5.ipynb b/Exercise5/exercise5.ipynb index c0ca4f5d..5da0c5f5 100755 --- a/Exercise5/exercise5.ipynb +++ b/Exercise5/exercise5.ipynb @@ -396,7 +396,7 @@ " A vector of shape m. error_train[i] contains the training error for\n", " i examples.\n", " error_val : array_like\n", - " A vecotr of shape m. error_val[i] contains the validation error for\n", + " A vector of shape m. error_val[i] contains the validation error for\n", " i training examples.\n", " \n", " Instructions\n", @@ -687,9 +687,9 @@ "\n", "### 3.2 Optional (ungraded) exercise: Adjusting the regularization parameter\n", "\n", - "In this section, you will get to observe how the regularization parameter affects the bias-variance of regularized polynomial regression. You should now modify the the lambda parameter and try $\\lambda = 1, 100$. For each of these values, the script should generate a polynomial fit to the data and also a learning curve.\n", + "In this section, you will get to observe how the regularization parameter affects the bias-variance of regularized polynomial regression. You should now modify the lambda parameter and try $\\lambda = 1, 100$. For each of these values, the script should generate a polynomial fit to the data and also a learning curve.\n", "\n", - "For $\\lambda = 1$, the generated plots should look like the the figure below. You should see a polynomial fit that follows the data trend well (left) and a learning curve (right) showing that both the cross validation and training error converge to a relatively low value. This shows the $\\lambda = 1$ regularized polynomial regression model does not have the high-bias or high-variance problems. In effect, it achieves a good trade-off between bias and variance.\n", + "For $\\lambda = 1$, the generated plots should look like the figure below. You should see a polynomial fit that follows the data trend well (left) and a learning curve (right) showing that both the cross validation and training error converge to a relatively low value. This shows the $\\lambda = 1$ regularized polynomial regression model does not have the high-bias or high-variance problems. In effect, it achieves a good trade-off between bias and variance.\n", "\n", "