diff --git a/cli/README.md b/cli/README.md index c733b42..08f50ff 100644 --- a/cli/README.md +++ b/cli/README.md @@ -44,10 +44,10 @@ kubectl **Examples** ``` -$ kubectl create -f mypod.yaml -$ kubectl get pods -$ kubectl get pod mypod -$ kubectl delete pod mypod +kubectl create -f mypod.yaml +kubectl get pods +kubectl get pod mypod +kubectl delete pod mypod ``` --- @@ -75,12 +75,12 @@ This config is viewable without having to view the file directly. **Command** ``` -$ kubectl config view +kubectl config view ``` **Example** ```yaml -$ kubectl config view +kubectl config view apiVersion: v1 clusters: - cluster: @@ -130,13 +130,13 @@ Kubernetes documentation. 1. View the current contexts. ``` -$ kubectl config get-contexts +kubectl config get-contexts ``` 2. Create a new context called `minidev` within the `minikube` cluster with the `dev` namespace, as the `minikube` user. ``` -$ kubectl config set-context minidev --cluster=minikube --user=minikube --namespace=dev +kubectl config set-context minidev --cluster=minikube --user=minikube --namespace=dev ``` 3. View the newly added context. @@ -146,12 +146,12 @@ kubectl config get-contexts 4. Switch to the `minidev` context using `use-context`. ``` -$ kubectl config use-context minidev +kubectl config use-context minidev ``` 5. View the current active context. ``` -$ kubectl config current-context +kubectl config current-context ``` --- @@ -189,7 +189,7 @@ kubectl get -o **Examples** ``` -$ kubectl get namespaces +kubectl get namespaces NAME STATUS AGE default Active 4h kube-public Active 4h @@ -215,10 +215,10 @@ kubectl create -f **Examples** ``` -$ kubectl create namespace dev +kubectl create namespace dev namespace "dev" created $ -$ kubectl create -f manifests/mypod.yaml +kubectl create -f manifests/mypod.yaml pod "mypod" created ``` @@ -241,7 +241,7 @@ kubectl apply -f **Examples** ``` -$ kubectl apply -f manifests/mypod.yaml +kubectl apply -f manifests/mypod.yaml Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply pod "mypod" configured ``` @@ -256,7 +256,7 @@ will essentially be untracked. **Command** ``` -$ kubectl edit +kubectl edit ``` **Examples** @@ -277,7 +277,7 @@ kubectl delete **Examples** ``` -$ kubectl delete pod mypod +kubectl delete pod mypod pod "mypod" deleted ``` @@ -295,7 +295,7 @@ kubectl describe **Examples** ``` -$ kubectl describe pod mypod +kubectl describe pod mypod Name: mypod Namespace: dev Node: minikube/192.168.99.100 @@ -307,7 +307,7 @@ IP: 172.17.0.6 Containers: nginx: Container ID: docker://5a0c100de6599300b1565e73e64e8917f9a4f4b06325dc4890aad980d582cf04 - Image: nginx:stable-alpine + Image: twalter/openshift-nginx Image ID: docker-pullable://nginx@sha256:db5acc22920799fe387a903437eb89387607e5b3f63cf0f4472ac182d7bad644 Port: 80/TCP State: Running @@ -335,7 +335,7 @@ Events: ---- ------ ---- ---- ------- Normal Scheduled 5s default-scheduler Successfully assigned mypod to minikube Normal SuccessfulMountVolume 5s kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-s2xd7" - Normal Pulled 5s kubelet, minikube Container image "nginx:stable-alpine" already present on machine + Normal Pulled 5s kubelet, minikube Container image "twalter/openshift-nginx" already present on machine Normal Created 5s kubelet, minikube Created container Normal Started 5s kubelet, minikube Started container ``` @@ -354,7 +354,7 @@ kubectl logs -c **Examples** ``` -$ kubectl logs mypod +kubectl logs mypod 172.17.0.1 - - [10/Mar/2018:18:14:15 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.57.0" "-" 172.17.0.1 - - [10/Mar/2018:18:14:17 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.57.0" "-" ``` @@ -432,7 +432,7 @@ kubectl exec -it -c -- **Example** ``` -$ kubectl exec mypod -c nginx -- printenv +kubectl exec mypod -c nginx -- printenv PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=mypod KUBERNETES_SERVICE_PORT_HTTPS=443 @@ -446,7 +446,7 @@ KUBERNETES_SERVICE_PORT=443 NGINX_VERSION=1.12.2 HOME=/root $ -$ kubectl exec -i -t mypod -c nginx -- /bin/sh +kubectl exec -i -t mypod -c nginx -- /bin/sh / # / # cat /etc/alpine-release 3.5.2 @@ -461,23 +461,23 @@ $ kubectl exec -i -t mypod -c nginx -- /bin/sh 1) If not already created, create the Pod `mypod` from the manifest `manifests/mypod.yaml`. ``` -$ kubectl create -f manifests/mypod.yaml +kubectl create -f manifests/mypod.yaml ``` 2) Wait for the Pod to become ready (`running`). ``` -$ kubectl get pods --watch +kubectl get pods --watch ``` 3) Use `kubectl exec` to `cat` the file `/etc/os-release`. ``` -$ kubectl exec mypod -- cat /etc/os-release +kubectl exec mypod -- cat /etc/os-release ``` It should output the contents of the `os-release` file. 4) Now use `kubectl exec` and supply the `-i -t` flags to spawn a shell session within the container. ``` -$ kubectl exec -i -t mypod -- /bin/sh +kubectl exec -i -t mypod -- /bin/sh ``` If executed correctly, it should drop you into a new shell session within the nginx container. @@ -512,7 +512,7 @@ kubectl proxy --port= **Examples** ``` -$ kubectl proxy +kubectl proxy Starting to serve on 127.0.0.1:8001 @@ -580,12 +580,12 @@ $ minikube dashboard 1) Create the Pod `mypod` from the manifest `manifests/mypod.yaml`. (if not created previously) ``` -$ kubectl create -f manifests/mypod.yaml +kubectl create -f manifests/mypod.yaml ``` 2) Start the `kubectl proxy` with the defaults. ``` -$ kubectl proxy +kubectl proxy ``` 3) Access the Pod through the proxy. @@ -621,7 +621,7 @@ external IP, or create firewall rules is an incredibly useful tool for troublesh ## Cleaning up **NOTE:** If you are proceeding with the next tutorials, simply delete the pod with: ``` -$ kubectl delete pod mypod +kubectl delete pod mypod ``` The namespace and context will be reused. diff --git a/cli/manifests/mypod.yaml b/cli/manifests/mypod.yaml index 64eaf05..d3801b4 100644 --- a/cli/manifests/mypod.yaml +++ b/cli/manifests/mypod.yaml @@ -5,6 +5,6 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 diff --git a/configuration/README.md b/configuration/README.md index 32d5442..c147554 100644 --- a/configuration/README.md +++ b/configuration/README.md @@ -54,12 +54,12 @@ data: **Command** ``` -$ kubectl create -f manifests/cm-manifest.yaml +kubectl create -f manifests/cm-manifest.yaml ``` View the created ConfigMap. ``` -$ kubectl get configmap manifest-example -o yaml +kubectl get configmap manifest-example -o yaml ``` #### From Literal @@ -67,36 +67,36 @@ $ kubectl get configmap manifest-example -o yaml Create ConfigMap `literal-example` using the `--from-literal` flag and `city=Ann Arbor` along with `state=Michigan` for the values. ``` -$ kubectl create cm literal-example --from-literal="city=Ann Arbor" --from-literal=state=Michigan +kubectl create cm literal-example --from-literal="city=Ann Arbor" --from-literal=state=Michigan ``` View the created ConfigMap. ``` -$ kubectl get cm literal-example -o yaml +kubectl get cm literal-example -o yaml ``` #### From Directory Create ConfigMap `dir-example` by using the `manifests/cm` directory as the source. ``` -$ kubectl create cm dir-example --from-file=manifests/cm/ +kubectl create cm dir-example --from-file=manifests/cm/ ``` View the created ConfigMap. ``` -$ kubectl get cm dir-example -o yaml +kubectl get cm dir-example -o yaml ``` #### From File Create ConfigMap `file-example` by using the `city` and `state` files in the `manifests/cm` directory. ``` -$ kubectl create cm file-example --from-file=manifests/cm/city --from-file=manifests/cm/state +kubectl create cm file-example --from-file=manifests/cm/city --from-file=manifests/cm/state ``` View the created ConfigMap. ``` -$ kubectl get cm file-example -o yaml +kubectl get cm file-example -o yaml ``` **Note:** When creating a ConfigMap from a file or directory the content will assume to be multiline as signified by @@ -146,7 +146,7 @@ spec: **Command** ``` -$ kubectl create -f manifests/cm-env-example.yaml +kubectl create -f manifests/cm-env-example.yaml ``` Note how the Environment Variable is injected using `valueFrom` and `configMapKeyRef`. This queries a specific key @@ -154,12 +154,12 @@ from the ConfigMap and injects it as an Environment Variable. 2) List the Pods. ``` -$ kubectl get pods +kubectl get pods ``` 3) Copy the pod name and view the output of the Job. ``` -$ kubectl logs cm-env-example- +kubectl logs cm-env-example- ``` It should echo the value from the `manifest-example` ConfigMap `city` key-value pair. @@ -192,17 +192,17 @@ spec: **Command** ``` -$ kubectl create -f manifests/cm-cmd-example.yaml +kubectl create -f manifests/cm-cmd-example.yaml ``` 5) List the Pods. ``` -$ kubectl get pods +kubectl get pods ``` 3) Copy the pod name of the `cm-cmd-example` job and view the output of the Pod. ``` -$ kubectl logs cm-cmd-example- +kubectl logs cm-cmd-example- ``` It should echo the string "Hello from " referencing the value from the `manifest-example` ConfigMap. @@ -266,7 +266,7 @@ spec: **Command** ``` -$ kubectl create -f manifests/cm-vol-example.yaml +kubectl create -f manifests/cm-vol-example.yaml ``` Note the volumes and how they are being referenced. The volume `city`, has an array of `items` that contains a sub-set @@ -275,26 +275,26 @@ possible to override the name or path to the generated file by supplying an argu 2) View the contents of the `/myconfig` volume mount. ``` -$ kubectl exec cm-vol-example -- ls /myconfig +kubectl exec cm-vol-example -- ls /myconfig ``` It will contain two files, matching the names of the keys stored in configMap `manifest-example`. 3) `cat` the contents of the files. ``` -$ kubectl exec cm-vol-example -- /bin/sh -c "cat /myconfig/*" +kubectl exec cm-vol-example -- /bin/sh -c "cat /myconfig/*" ``` It will match the values stored in the configMap `manifest-example` concatenated together. 4) View the contents of the other Volume Mount `mycity`. ``` -$ kubectl exec cm-vol-example -- ls /mycity +kubectl exec cm-vol-example -- ls /mycity ``` A file will be present that represents the single item being referenced in the `city` volume. This file bears the name `thisismycity` as specified by the `path` variable. 5) `cat` contents of the `thisismycity` file. ``` -$ kubectl exec cm-vol-example -- cat /mycity/thisismycity +kubectl exec cm-vol-example -- cat /mycity/thisismycity ``` The contents should match the value of data[city]. @@ -358,7 +358,7 @@ data: **Command** ``` -$ kubectl create -f manifests/secret-manifest.yaml +kubectl create -f manifests/secret-manifest.yaml ``` Note the Secret has the additional attribute `type` when compared to a ConfigMap. The `Opaque` value simply means the @@ -367,7 +367,7 @@ data is unstructured. Additionally, the content referenced in `data` itself is b View the created Secret. ``` -$ kubectl get secret manifest-example -o yaml +kubectl get secret manifest-example -o yaml ``` #### From Literal @@ -375,7 +375,7 @@ $ kubectl get secret manifest-example -o yaml Create Secret `literal-example` using the `--from-literal` flag and `username=example` along with `password=mypassword` for the values. ``` -$ kubectl create secret generic literal-example --from-literal=username=example --from-literal=password=mypassword +kubectl create secret generic literal-example --from-literal=username=example --from-literal=password=mypassword ``` **Note:** Unlike ConfigMaps you **must** also specify the type of Secret you are creating. There are 3 types: * docker-registry - Credentials used to interact with a container registry. @@ -385,31 +385,31 @@ $ kubectl create secret generic literal-example --from-literal=username=example View the created Secret. ``` -$ kubectl get secret literal-example -o yaml +kubectl get secret literal-example -o yaml ``` #### From Directory Create Secret `dir-example` by using the `manifests/secret` directory as the source. ``` -$ kubectl create secret generic dir-example --from-file=manifests/secret/ +kubectl create secret generic dir-example --from-file=manifests/secret/ ``` View the created Secret. ``` -$ kubectl get secret dir-example -o yaml +kubectl get secret dir-example -o yaml ``` #### From File Create ConfigMap `file-example` by using the `username` and `password` files in the `manifests/secret` directory. ``` -$ kubectl create secret generic file-example --from-file=manifests/secret/username --from-file=manifests/secret/password +kubectl create secret generic file-example --from-file=manifests/secret/username --from-file=manifests/secret/password ``` View the created Secret. ``` -$ kubectl get secret file-example -o yaml +kubectl get secret file-example -o yaml ``` --- @@ -457,7 +457,7 @@ spec: **Command** ``` -$ kubectl create -f manifests/secret-env-example.yaml +kubectl create -f manifests/secret-env-example.yaml ``` Note how the Environment Variable is injected using `valueFrom` and `secretKeyRef`. This queries a specific key @@ -465,12 +465,12 @@ from the Secret and injects it as an Environment Variable. 2) List the Pods. ``` -$ kubectl get pods +kubectl get pods ``` 3) Copy the pod name and view the output of the Job. ``` -$ kubectl logs secret-env-example- +kubectl logs secret-env-example- ``` It should echo the value from the `manifest-example` Secret `username` key-value pair. @@ -503,17 +503,17 @@ spec: **Command** ``` -$ kubectl create -f manifests/secret-cmd-example.yaml +kubectl create -f manifests/secret-cmd-example.yaml ``` 5) List the Pods. ``` -$ kubectl get pods +kubectl get pods ``` 3) Copy the pod name of the `secret-cmd-example` job and view the output of the Pod. ``` -$ kubectl logs secret-cmd-example- +kubectl logs secret-cmd-example- ``` It should echo the string "Hello there !" referencing the value from the `manifest-example` Secret. @@ -577,7 +577,7 @@ spec: **Command** ``` -$ kubectl create -f manifests/secret-vol-example.yaml +kubectl create -f manifests/secret-vol-example.yaml ``` Note the volumes and how they are being referenced. The volume `password`, has an array of `items` that contains a @@ -586,26 +586,26 @@ possible to override the name or path to the generated file by supplying an argu 2) View the contents of the `/mysecret` volume mount. ``` -$ kubectl exec secret-vol-example -- ls /mysecret +kubectl exec secret-vol-example -- ls /mysecret ``` It will contain two files, matching the names of the keys stored in Secret `manifest-example`. 3) `cat` the contents of the files. ``` -$ kubectl exec secret-vol-example -- /bin/sh -c "cat /mysecret/*" +kubectl exec secret-vol-example -- /bin/sh -c "cat /mysecret/*" ``` It will match the values stored in the Secret `manifest-example` concatenated together. 4) View the contents of the other Volume Mount `mypass`. ``` -$ kubectl exec secret-vol-example -- ls /mypass +kubectl exec secret-vol-example -- ls /mypass ``` A file will be present that represents the single item being referenced in the `password` volume. This file bears the name `supersecretpass` as specified by the `path` variable. 5) `cat` contents of the `supersecretpass` file. ``` -$ kubectl exec secret-vol-example -- cat /mypass/supersecretpass +kubectl exec secret-vol-example -- cat /mypass/supersecretpass ``` The contents should match the value of data[password]. diff --git a/core/README.md b/core/README.md index da64a2b..ed63d5f 100644 --- a/core/README.md +++ b/core/README.md @@ -35,23 +35,23 @@ access. 1) List the current namespaces ``` -$ kubectl get namespaces +kubectl get namespaces ``` 2) Create the `dev` namespace ``` -$ kubectl create namespace dev +kubectl create namespace dev ``` 3) Create a new context called `minidev` within the `minikube` cluster as the `minikube` user, with the namespace set to `dev`. ``` -$ kubectl config set-context minidev --cluster=minikube --user=minikube --namespace=dev +kubectl config set-context minidev --cluster=minikube --user=minikube --namespace=dev ``` 4) Switch to the newly created context. ``` -$ kubectl config use-context minidev +kubectl config use-context minidev ``` --- @@ -81,7 +81,7 @@ their exposed Services through the API Server proxy. --- -1) Create a simple Pod called `pod-example` using the `nginx:stable-alpine` image and expose port `80`. Use the +1) Create a simple Pod called `pod-example` using the `twalter/openshift-nginx` image and expose port `8081`. Use the manifest `manifests/pod-example.yaml` or the yaml below. **manifests/pod-example.yaml** @@ -93,26 +93,26 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/pod-example.yaml +kubectl create -f manifests/pod-example.yaml ``` 2) Use `kubectl` to describe the Pod and note the available information. ``` -$ kubectl describe pod pod-example +kubectl describe pod pod-example ``` 3) Use `kubectl proxy` to verify the web server running in the deployed Pod. **Command** ``` -$ kubectl proxy +kubectl proxy ``` **URL** ``` @@ -133,9 +133,9 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: html mountPath: /usr/share/nginx/html @@ -157,7 +157,7 @@ spec: **Command** ``` -$ kubectl create -f manifests/pod-multi-container-example.yaml +kubectl create -f manifests/pod-multi-container-example.yaml ``` **Note:** `spec.containers` is an array allowing you to use multiple containers within a Pod. @@ -165,7 +165,7 @@ $ kubectl create -f manifests/pod-multi-container-example.yaml **Command** ``` -$ kubectl proxy +kubectl proxy ``` **URL** ``` @@ -204,12 +204,12 @@ set-based selectors. 1) Label the Pod `pod-example` with `app=nginx` and `environment=dev` via `kubectl`. ``` -$ kubectl label pod pod-example app=nginx environment=dev +kubectl label pod pod-example app=nginx environment=dev ``` 2) View the labels with `kubectl` by passing the `--show-labels` flag ``` -$ kubectl get pods --show-labels +kubectl get pods --show-labels ``` 3) Update the multi-container example manifest created previously with the labels `app=nginx` and `environment=prod` @@ -227,9 +227,9 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: html mountPath: /usr/share/nginx/html @@ -251,31 +251,31 @@ spec: **Command** ``` -$ kubectl apply -f manifests/pod-multi-container-example.yaml +kubectl apply -f manifests/pod-multi-container-example.yaml ``` 4) View the added labels with `kubectl` by passing the `--show-labels` flag once again. ``` -$ kubectl get pods --show-labels +kubectl get pods --show-labels ``` 5) With the objects now labeled, use an [equality based selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement) targeting the `prod` environment. ``` -$ kubectl get pods --selector environment=prod +kubectl get pods --selector environment=prod ``` 6) Do the same targeting the `nginx` app with the short version of the selector flag (`-l`). ``` -$ kubectl get pods -l app=nginx +kubectl get pods -l app=nginx ``` 7) Use a [set-based selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement) to view all pods where the `app` label is `nginx` and filter out any that are in the `prod` environment. ``` -$ kubectl get pods -l 'app in (nginx), environment notin (prod)' +kubectl get pods -l 'app in (nginx), environment notin (prod)' ``` --- @@ -304,7 +304,7 @@ resource (unlike Pods) that is given a static cluster-unique IP and provide simp --- -1) Create `ClusterIP` service `clusterip` that targets Pods labeled with `app=nginx` forwarding port `80` using +1) Create `ClusterIP` service `clusterip` that targets Pods labeled with `app=nginx` forwarding port `8081` using either the yaml below, or the manifest `manifests/service-clusterip.yaml`. **manifests/service-clusterip.yaml** @@ -318,25 +318,25 @@ spec: app: nginx ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/service-clusterip.yaml +kubectl create -f manifests/service-clusterip.yaml ``` 2) Describe the newly created service. Note the `IP` and the `Endpoints` fields. ``` -$ kubectl describe service clusterip +kubectl describe service clusterip ``` 3) View the service through `kube proxy` and refresh several times. It should serve up pages from both pods. **Command** ``` -$ kubectl proxy +kubectl proxy ``` **URL** ``` @@ -346,7 +346,7 @@ http://127.0.0.1:8001/api/v1/namespaces/dev/services/clusterip/proxy/ 4) Lastly, verify that the generated DNS record has been created for the Service by using nslookup within the `example-pod` Pod that was provisioned in the [Creating Pods](#exercise-creating-pods) exercise. ``` -$ kubectl exec pod-example -- nslookup clusterip.dev.svc.cluster.local +kubectl exec pod-example -- nslookup clusterip.dev.svc.cluster.local ``` It should return a valid response with the IP matching what was noted earlier when describing the Service. @@ -365,7 +365,7 @@ which exposed Pod Services are consumed **within** a Kubernetes Cluster. --- 1) Create a `NodePort` Service called `nodeport` that targets Pods with the labels `app=nginx` and `environment=dev` -forwarding port `80` in cluster, and port `32410` on the node itself. Use either the yaml below, or the manifest +forwarding port `8081` in cluster, and a random port on the node itself. Use either the yaml below, or the manifest `manifests/service-nodeport.yaml`. **manifests/service-nodeport.yaml** @@ -380,21 +380,20 @@ spec: app: nginx environment: prod ports: - - nodePort: 32410 - protocol: TCP - port: 80 - targetPort: 80 + - protocol: TCP + port: 8081 + targetPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/service-nodeport.yaml +kubectl create -f manifests/service-nodeport.yaml ``` 2) Describe the newly created Service Endpoint. Note the Service still has an internal cluster `IP`, and now additionally has a `NodePort`. ``` -$ kubectl describe service nodeport +kubectl describe service nodeport ``` 3) Use the `minikube service` command to open the newly exposed `nodeport` Service in a browser. @@ -405,14 +404,14 @@ $ minikube service -n dev nodeport 4) Lastly, verify that the generated DNS record has been created for the Service by using nslookup within the `example-pod` Pod. ``` -$ kubectl exec pod-example -- nslookup nodeport.dev.svc.cluster.local +kubectl exec pod-example -- nslookup nodeport.dev.svc.cluster.local ``` It should return a valid response with the IP matching what was noted earlier when describing the Service. --- **Summary:** The `NodePort` Services extend the `ClusterIP` Service and additionally expose a port that is either -statically defined, as above (port 32410) or dynamically taken from a range between 30000-32767. This port is then +statically defined, or dynamically taken from a range between 30000-32767. This port is then exposed on every node within the cluster and proxies to the created Service. --- @@ -432,11 +431,11 @@ that can do this, but for this example the Google [metalLB](https://github.com/g IP range. Edit the manifest `manifests/metalLB.yaml` and change the cidr range on line 20 (`192.168.99.224/28`) to fit your requirements. Otherwise go ahead and deploy it. ``` -$ kubectl create -f manifests/metalLB.yaml +kubectl create -f manifests/metalLB.yaml ``` 1) Create a `LoadBalancer` Service called `loadbalancer` that targets pods with the labels `app=nginx` and -`environment=prod` forwarding as port `80`. Use either the yaml below, or the manifest +`environment=prod` forwarding as port `8081`. Use either the yaml below, or the manifest `manifests/service--loadbalancer.yaml`. **manifests/service-loadbalancer.yaml** @@ -452,19 +451,19 @@ spec: environment: prod ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/service-loadbalancer.yaml +kubectl create -f manifests/service-loadbalancer.yaml ``` 2) Describe the Service `loadbalancer`, and note the Service retains the aspects of both the `ClusterIP` and `NodePort` Service types in addition to having a new attribute `LoadBalancer Ingress`. ``` -$ kubectl describe service loadbalancer +kubectl describe service loadbalancer ``` 3) Open a browser and visit the IP noted in the `Loadbalancer Ingress` field. It should directly map to the exposed @@ -479,7 +478,7 @@ $ minikube service -n dev loadbalancer 5) Finally, verify that the generated DNS record has been created for the Service by using nslookup within the `example-pod` Pod. ``` -$ kubectl exec pod-example -- nslookup loadbalancer.dev.svc.cluster.local +kubectl exec pod-example -- nslookup loadbalancer.dev.svc.cluster.local ``` It should return a valid response with the IP matching what was noted earlier when describing the Service. @@ -499,19 +498,19 @@ turn direct traffic to the desired Pods. 1) Create an `ExternalName` service called `externalname` that points to `google.com` ``` -$ kubectl create service externalname externalname --external-name=google.com +kubectl create service externalname externalname --external-name=google.com ``` 2) Describe the `externalname` Service. Note that it does **NOT** have an internal IP or other _normal_ service attributes. ``` -$ kubectl describe service externalname +kubectl describe service externalname ``` 3) Lastly, look at the generated DNS record has been created for the Service by using nslookup within the `example-pod` Pod. It should return the IP of `google.com`. ``` -$ kubectl exec pod-example -- nslookup externalname.dev.svc.cluster.local +kubectl exec pod-example -- nslookup externalname.dev.svc.cluster.local ``` --- diff --git a/core/manifests/pod-example.yaml b/core/manifests/pod-example.yaml index bb0afa9..c4f2f79 100644 --- a/core/manifests/pod-example.yaml +++ b/core/manifests/pod-example.yaml @@ -5,6 +5,6 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 diff --git a/core/manifests/pod-multi-container-example.yaml b/core/manifests/pod-multi-container-example.yaml index 0ec8ea9..636a1c7 100644 --- a/core/manifests/pod-multi-container-example.yaml +++ b/core/manifests/pod-multi-container-example.yaml @@ -5,9 +5,9 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: html mountPath: /usr/share/nginx/html diff --git a/core/manifests/service-clusterip.yaml b/core/manifests/service-clusterip.yaml index 7d556b8..d349383 100644 --- a/core/manifests/service-clusterip.yaml +++ b/core/manifests/service-clusterip.yaml @@ -7,5 +7,5 @@ spec: app: nginx ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 diff --git a/core/manifests/service-loadbalancer.yaml b/core/manifests/service-loadbalancer.yaml index 9ff148b..778ccfb 100644 --- a/core/manifests/service-loadbalancer.yaml +++ b/core/manifests/service-loadbalancer.yaml @@ -9,5 +9,5 @@ spec: environment: prod ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 diff --git a/core/manifests/service-nodeport.yaml b/core/manifests/service-nodeport.yaml index 8784821..e67cb99 100644 --- a/core/manifests/service-nodeport.yaml +++ b/core/manifests/service-nodeport.yaml @@ -8,7 +8,6 @@ spec: app: nginx environment: prod ports: - - nodePort: 32410 - protocol: TCP - port: 80 - targetPort: 80 + - protocol: TCP + port: 8081 + targetPort: 8081 diff --git a/examples/jupyterhub/README.md b/examples/jupyterhub/README.md index 63a16e4..4ad938f 100644 --- a/examples/jupyterhub/README.md +++ b/examples/jupyterhub/README.md @@ -24,7 +24,7 @@ automatically. Together they make for a fairly seamless Jupyter experience. Create the service accounts and rbac policies with the below command. ``` -$ kubectl create -f manifests/rbac.yaml +kubectl create -f manifests/rbac.yaml ``` **NOTE:** RBAC is out of scope for the introductory tutorials, however they're required for both the Hub and Proxy to @@ -37,7 +37,7 @@ be able to communicate with the Kubernetes API. If you are interested at explori 1. Create the 3 ConfigMaps: ``` -$ kubectl create \ +kubectl create \ -f manifests/cm-hub-config.yaml \ -f manifests/cm-ingress.yaml \ -f manifests/cm-nginx.yaml @@ -50,17 +50,17 @@ $ kubectl create \ 2. Create the [secret](manifests/secret-hub.yaml) used by the Proxy to authenticate to the Hub. ``` -$ kubectl create -f manifests/secret-hub.yaml +kubectl create -f manifests/secret-hub.yaml ``` 3. Create the [PVC](manifests/pvc-hub.yaml) used by the Hub to store it's internal database. ``` -$ kubectl create -f manifests/pvc-hub.yaml +kubectl create -f manifests/pvc-hub.yaml ``` 4. Now create the 4 services used by both the Hub and Proxy: ``` -$ kubectl create \ +kubectl create \ -f manifests/svc-hub.yaml \ -f manifests/svc-proxy-api.yaml \ -f manifests/svc-proxy-http.yaml \ @@ -78,7 +78,7 @@ $ kubectl create \ 5. With everything else provisioned, the two deployments for the Hub Server and Proxy may now be created. ``` -$ kubectl create \ +kubectl create \ -f manifests/deploy-hub.yaml \ -f manifests/deploy-proxy.yaml ``` @@ -88,7 +88,7 @@ $ kubectl create \ 6. Wait for the Pods to be up and running: ``` -$ kubectl get pods --watch +kubectl get pods --watch ``` **NOTE:** It is common for the Hub Server to restart at least once. @@ -101,7 +101,7 @@ $ minikube service proxy-public 8. Watch the Pods once again. ``` -$ kubectl get pods --watch +kubectl get pods --watch ``` There will be Pod spinning up with the name `jupyter-admin`. This is the dynamically provisioned notebook server being spun up. @@ -113,9 +113,9 @@ With that you should have a fully functional instance of the JupyterHub provisio ## Clean Up ``` -$ kubectl delete -f manifests/ -$ kubectl delete pod jupyter-admin -$ kubectl delete pvc claim-admin +kubectl delete -f manifests/ +kubectl delete pod jupyter-admin +kubectl delete pvc claim-admin ``` [hub]: https://jupyterhub.readthedocs.io/en/latest/ diff --git a/examples/jupyterhub/manifests/deploy-hub.yaml b/examples/jupyterhub/manifests/deploy-hub.yaml index a0eb670..7668d95 100644 --- a/examples/jupyterhub/manifests/deploy-hub.yaml +++ b/examples/jupyterhub/manifests/deploy-hub.yaml @@ -35,7 +35,7 @@ spec: name: hub-secret ports: - name: hub - containerPort: 8081 + containerPort: 808181 protocol: TCP resources: requests: diff --git a/examples/jupyterhub/manifests/deploy-proxy.yaml b/examples/jupyterhub/manifests/deploy-proxy.yaml index 85d3c8e..fbba064 100644 --- a/examples/jupyterhub/manifests/deploy-proxy.yaml +++ b/examples/jupyterhub/manifests/deploy-proxy.yaml @@ -38,7 +38,7 @@ spec: fieldPath: metadata.namespace ports: - name: http - containerPort: 80 + containerPort: 8081 protocol: TCP - name: chp image: jupyterhub/configurable-http-proxy:3.0.0 @@ -59,10 +59,10 @@ spec: name: hub-secret ports: - name: proxy-public - containerPort: 8000 + containerPort: 8081 protocol: TCP - name: api - containerPort: 8001 + containerPort: 8081 protocol: TCP resources: requests: diff --git a/examples/jupyterhub/manifests/svc-proxy-public.yaml b/examples/jupyterhub/manifests/svc-proxy-public.yaml index c081780..afeb0a4 100644 --- a/examples/jupyterhub/manifests/svc-proxy-public.yaml +++ b/examples/jupyterhub/manifests/svc-proxy-public.yaml @@ -10,6 +10,6 @@ spec: ports: - name: http nodePort: 32020 - port: 80 + port: 8081 protocol: TCP - targetPort: 80 \ No newline at end of file + targetPort: 8081 \ No newline at end of file diff --git a/examples/wordpress/README.md b/examples/wordpress/README.md index fd44049..17d7751 100644 --- a/examples/wordpress/README.md +++ b/examples/wordpress/README.md @@ -16,7 +16,7 @@ production deployment. For a more production ready deployment, see the [WordPres 1. Create the Secret used for the MySQL root account: ``` -$ kubectl create -f manifests/secret-mysql.yaml +kubectl create -f manifests/secret-mysql.yaml ``` * **[manifests/secret-mysql.yaml](manifests/secret-mysql.yaml)** - Contains a base64 encoded string to serve as the @@ -25,7 +25,7 @@ $ kubectl create -f manifests/secret-mysql.yaml 2. Create the MySQL [StatefulSet](manifests/sts-mysql.yaml) and its associated [service](manifests/svc-mysql.yaml). ``` -$ kubectl create \ +kubectl create \ -f manifests/sts-mysql.yaml \ -f manifests/svc-mysql.yaml ``` @@ -39,20 +39,20 @@ dynamically provision a volume. 3. Wait for the Pod to be up and running: ``` -$ kubectl get pods --watch +kubectl get pods --watch ``` 3. With MySQL up and running, WordPress can now be provisioned. Start by Creating the [PVC](manifests/pvc-wordpress.yaml) used to store WordPress's internal data. ``` -$ kubectl create -f manifests/pvc-wordpress.yaml +kubectl create -f manifests/pvc-wordpress.yaml ``` * **[manifests/pvc-wordpress.yaml](manifests/pvc-wordpress.yaml)** - The Persistent Volume Claim used for the WordPress pod's own internal storage. 4. Now create the WordPress deployment and its associated Service. ``` -$ kubectl create \ +kubectl create \ -f manifests/dep-wordpress.yaml \ -f manifests/svc-wordpress.yaml ``` @@ -63,7 +63,7 @@ $ kubectl create \ 5. Wait for the Pods to be up and running: ``` -$ kubectl get pods --watch +kubectl get pods --watch ``` 6. With both MySQL and WordPress up and running, use the `minikube service` command to access the WordPress deployment. @@ -79,8 +79,8 @@ give it a go! ## Clean Up ``` -$ kubectl delete -f manifests/ -$ kubectl delete pvc mysql-data-mysql-0 +kubectl delete -f manifests/ +kubectl delete pvc mysql-data-mysql-0 ``` [wordpress]: https://wordpress.org/ diff --git a/examples/wordpress/manifests/dep-wordpress.yaml b/examples/wordpress/manifests/dep-wordpress.yaml index 019804d..cfc657f 100644 --- a/examples/wordpress/manifests/dep-wordpress.yaml +++ b/examples/wordpress/manifests/dep-wordpress.yaml @@ -31,7 +31,7 @@ spec: name: mysql key: password ports: - - containerPort: 80 + - containerPort: 8081 name: wordpress volumeMounts: - name: wordpress diff --git a/examples/wordpress/manifests/svc-wordpress.yaml b/examples/wordpress/manifests/svc-wordpress.yaml index c803afe..16056d5 100644 --- a/examples/wordpress/manifests/svc-wordpress.yaml +++ b/examples/wordpress/manifests/svc-wordpress.yaml @@ -7,7 +7,7 @@ metadata: component: wordpress spec: ports: - - port: 80 + - port: 8081 selector: app: wordpress component: wordpress diff --git a/storage/README.md b/storage/README.md index bb00c18..ded3115 100644 --- a/storage/README.md +++ b/storage/README.md @@ -34,7 +34,7 @@ is used within a Kubernetes cluster. For these exercises, it should be disabled. ``` $ minikube addons disable default-storageclass -$ kubectl delete sc standard +kubectl delete sc standard ``` **Attention Windows Users:** There is a [known issue with minikube and enabling/disabling @@ -42,8 +42,8 @@ addons](https://github.com/kubernetes/minikube/issues/2281). If you encounter an the following: ``` $ minikube ssh 'sudo mv /etc/kubernetes/manifests/addon-manager.yaml /etc/kubernetes/addon-manager.yaml' -$ kubectl delete pod storage-provisioner -n kube-system -$ kubectl delete sc standard +kubectl delete pod storage-provisioner -n kube-system +kubectl delete sc standard ``` @@ -84,9 +84,9 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: html mountPath: /usr/share/nginx/html @@ -109,14 +109,14 @@ spec: **Command** ``` -$ kubectl create -f manifests/volume-example.yaml +kubectl create -f manifests/volume-example.yaml ``` Note the relationship between `volumes` in the Pod spec, and the `volumeMounts` directive in each container. 2) Exec into `content` container within the `volume-example` Pod, and `cat` the `html/index.html` file. ``` -$ kubectl exec volume-example -c content -- /bin/sh -c "cat /html/index.html" +kubectl exec volume-example -c content -- /bin/sh -c "cat /html/index.html" ``` You should see a list of date time-stamps. This is generated by the script being used as the entrypoint (`args`) of the content container. @@ -124,13 +124,13 @@ content container. 3) Now do the same within the `nginx` container, using `cat` to see the content of `/usr/share/nginx/html/index.html` example. ``` -$ kubectl exec volume-example -c nginx -- /bin/sh -c "cat /usr/share/nginx/html/index.html" +kubectl exec volume-example -c nginx -- /bin/sh -c "cat /usr/share/nginx/html/index.html" ``` You should see the same file. 4) Now try to append "nginx" to `index.html` from the `nginx` container. ``` -$ kubectl exec volume-example -c nginx -- /bin/sh -c "echo nginx >> /usr/share/nginx/html/index.html" +kubectl exec volume-example -c nginx -- /bin/sh -c "echo nginx >> /usr/share/nginx/html/index.html" ``` It should error out and complain about the file being read only. The `nginx` container has no reason to write to the file, and mounts the same Volume as read-only. Writing to the file is handled by the `content` container. @@ -204,12 +204,12 @@ spec: **Command** ``` -$ kubectl create -f manifests/pv-sc-example.yaml +kubectl create -f manifests/pv-sc-example.yaml ``` 2) Once created, list the available Persistent Volumes. ``` -$ kubectl get pv +kubectl get pv ``` You should see the single PV `pv-sc-example` flagged with the status `Available`. Meaning no claim has been issued that targets it. @@ -235,14 +235,14 @@ spec: **Command** ``` -$ kubectl create -f manifests/pvc-selector-example.yaml +kubectl create -f manifests/pvc-selector-example.yaml ``` Note that the selector targets `type=hostpath`. 4) Then describe the newly created PVC ``` -$ kubectl describe pvc pvc-selector-example +kubectl describe pvc pvc-selector-example ``` The pvc `pvc-selector-example` should be in a `Pending` state with the Error Event `FailedBinding` and `no Persistent Volumes available for this claim and no storage class is set`. If a PV is given a `storageClassName`, @@ -270,12 +270,12 @@ spec: **Command** ``` -$ kubectl create -f manifests/pv-selector-example.yaml +kubectl create -f manifests/pv-selector-example.yaml ``` 6) Give it a few moments and then look at the Persistent Volumes once again. ``` -$ kubectl get pv +kubectl get pv ``` The PV `pv-selector-example` should now be in a `Bound` state, meaning that a PVC has been mapped or _"bound"_ to it. Once bound, **NO** other PVCs may make a claim against the PV. @@ -299,26 +299,26 @@ spec: **Command** ``` -$ kubectl create -f manifests/pvc-sc-example.yaml +kubectl create -f manifests/pvc-sc-example.yaml ``` Note that this PVC has a `storageClassName` reference and no selector. 8) Give it a few seconds and then view the current PVCs. ``` -$ kubectl get pvc +kubectl get pvc ``` The `pvc-sc-example` should be bound to the `pv-sc-example` Volume. It consumed the PV with the corresponding `storageClassName`. 9) Delete both PVCs. ``` -$ kubectl delete pvc pvc-sc-example pvc-selector-example +kubectl delete pvc pvc-sc-example pvc-selector-example ``` 10) Then list the PVs once again. ``` -$ kubectl get pv +kubectl get pv ```` The `pv-sc-example` will not be listed. This is because it was created with a `persistentVolumeReclaimPolicy` of `Delete` meaning that as soon as the PVC was deleted, the PV itself was deleted. @@ -331,7 +331,7 @@ accidentally deleted giving an administrator time to do something with the data 11) Delete the PV `pv-selector-example`. ``` -$ kubectl delete pv pv-selector-example +kubectl delete pv pv-selector-example ``` --- @@ -389,7 +389,7 @@ spec: **Command** ``` -$ kubectl create -f manifests/html-vol.yaml +kubectl create -f manifests/html-vol.yaml ``` 2) Create Deployment `writer` from the manifest `manifests/writer.yaml` or use the yaml below. It is similar to the @@ -432,7 +432,7 @@ spec: **Command** ``` -$ kubectl create -f manifests/writer.yaml +kubectl create -f manifests/writer.yaml ``` Note that the `claimName` references the previously created PVC defined in the `html-vol` manifest. @@ -457,9 +457,9 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: html mountPath: /usr/share/nginx/html @@ -480,19 +480,19 @@ spec: app: reader ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/reader.yaml +kubectl create -f manifests/reader.yaml ``` 3) With the `reader` Deployment and Service created, use `kubectl proxy` to view the `reader` Service. ``` -$ kubectl proxy +kubectl proxy ``` **URL** ``` @@ -503,7 +503,7 @@ created with the access mode `ReadWriteMany`. 4) Now try to append "nginx" to `index.html` from one of the `reader` Pods. ``` -$ kubectl exec reader-- -- /bin/sh -c "echo nginx >> /usr/share/nginx/html/index.html" +kubectl exec reader-- -- /bin/sh -c "echo nginx >> /usr/share/nginx/html/index.html" ``` The `reader` Pods have mounted the Volume as read only. Just as it did with exercise 1, The command should error out with a message complaining about not being able to modify a read-only filesystem. @@ -545,19 +545,19 @@ via a Storage Class. 1) Re-enable the minikube default-storageclass, and wait for it to become available ``` $ minikube addons enable default-storageclass -$ kubectl get sc --watch +kubectl get sc --watch ``` or if you had to perform the windows workaround, execute this: ``` $ minikube ssh 'sudo mv /etc/kubernetes/addon-manager.yaml /etc/kubernetes/manifests/addon-manager.yaml' -$ kubectl get sc --watch +kubectl get sc --watch ``` You should see Storage Class `standard` become available after a few moments. 2) Describe the new Storage Class ``` -$ kubectl describe sc standard +kubectl describe sc standard ``` Note the fields `IsDefaultClass`, `Provisioner`, and `ReclaimPolicy`. The `Provisioner` attribute references the _"driver"_ for the Storage Class. Minikube comes with it's own driver `k8s.io/minikube-hostpath` that simply mounts @@ -582,19 +582,19 @@ spec: **Command** ``` -$ kubectl create -f manifests/pvc-standard.yaml +kubectl create -f manifests/pvc-standard.yaml ``` 4) Describe the PVC `pvc-standard` ``` -$ kubectl describe pvc pvc-standard +kubectl describe pvc pvc-standard ``` The `Events` lists the actions that occurred when the PVC was created. The external provisioner `standard` provisions a Volume for the claim `default/pvc-standard` and is assigned the name `pvc-`. 5) List the PVs. ``` -$ kubectl get pv +kubectl get pv ``` The PV `pvc-` will be the **exact** size of the associated PVC. @@ -620,12 +620,12 @@ spec: **Command** ``` -$ kubectl create -f manifests/pvc-selector-example.yaml +kubectl create -f manifests/pvc-selector-example.yaml ``` 7) List the PVCs. ``` -$ kubectl get pvc +kubectl get pvc ``` The PVC `pvc-selector-example` was bound to a PV automatically, even without a valid selector target. The `standard` Storage Class was configured as the default, meaning that **any** PVCs that do not have a valid target will default to @@ -633,12 +633,12 @@ using the `standard` Storage Class. 8) Delete both PVCs. ``` -$ kubectl delete pvc pvc-standard pvc-selector-example +kubectl delete pvc pvc-standard pvc-selector-example ``` 9) List the PVs once again. ``` -$ kubectl get pv +kubectl get pv ``` The PVs were automatically reclaimed following the `ReclaimPolicy` that was set by the Storage Class. diff --git a/storage/manifests/reader.yaml b/storage/manifests/reader.yaml index 1c91533..1d966d4 100644 --- a/storage/manifests/reader.yaml +++ b/storage/manifests/reader.yaml @@ -14,9 +14,9 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: html mountPath: /usr/share/nginx/html @@ -37,5 +37,5 @@ spec: app: reader ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 diff --git a/storage/manifests/volume-example.yaml b/storage/manifests/volume-example.yaml index 6c7210a..a21d5fd 100644 --- a/storage/manifests/volume-example.yaml +++ b/storage/manifests/volume-example.yaml @@ -5,9 +5,9 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: html mountPath: /usr/share/nginx/html diff --git a/workloads/README.md b/workloads/README.md index be4b5ae..4d27d30 100644 --- a/workloads/README.md +++ b/workloads/README.md @@ -45,7 +45,7 @@ the Pod template, and how they are targeted with selectors. --- -1) Begin by creating a ReplicaSet called `rs-example` with `3` `replicas`, using the `nginx:stable-alpine` image and +1) Begin by creating a ReplicaSet called `rs-example` with `3` `replicas`, using the `twalter/openshift-nginx` image and configure the labels and selectors to target `app=nginx` and `env=prod`. The yaml block below or the manifest `manifests/rs-example.yaml` may be used. @@ -69,43 +69,43 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/rs-example.yaml +kubectl create -f manifests/rs-example.yaml ``` 2) Watch as the newly created ReplicaSet provisions the Pods based off the Pod Template. ``` -$ kubectl get pods --watch --show-labels +kubectl get pods --watch --show-labels ``` Note that the newly provisioned Pods are given a name based off the ReplicaSet name appended with a 5 character random string. These Pods are labeled with the labels as specified in the manifest. 3) Scale ReplicaSet `rs-example` up to `5` replicas with the below command. ``` -$ kubectl scale replicaset rs-example --replicas=5 +kubectl scale replicaset rs-example --replicas=5 ``` **Tip:** `replicaset` can be substituted with `rs` when using `kubectl`. 4) Describe `rs-example` and take note of the `Replicas` and `Pod Status` field in addition to the `Events`. ``` -$ kubectl describe rs rs-example +kubectl describe rs rs-example ``` 5) Now, using the `scale` command bring the replicas back down to `3`. ``` -$ kubectl scale rs rs-example --replicas=3 +kubectl scale rs rs-example --replicas=3 ``` 6) Watch as the ReplicaSet Controller terminates 2 of the Pods to bring the cluster back into it's desired state of 3 replicas. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` 7) Once `rs-example` is back down to 3 Pods. Create an independent Pod manually with the same labels as the one @@ -123,25 +123,25 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/pod-rs-example.yaml +kubectl create -f manifests/pod-rs-example.yaml ``` 8) Immediately watch the Pods. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` Note that the Pod is created and immediately terminated. 9) Describe `rs-example` and look at the `events`. ``` -$ kubectl describe rs rs-example +kubectl describe rs rs-example ``` There will be an entry with `Deleted pod: pod-example`. This is because a ReplicaSet targets **ALL** Pods matching the labels supplied in the selector. @@ -207,31 +207,31 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/deploy-example.yaml --record +kubectl create -f manifests/deploy-example.yaml --record ``` 2) Check the status of the Deployment. ``` -$ kubectl get deployments +kubectl get deployments ``` 3) Once the Deployment is ready, view the current ReplicaSets and be sure to show the labels. ``` -$ kubectl get rs --show-labels +kubectl get rs --show-labels ``` Note the name and `pod-template-hash` label of the newly created ReplicaSet. The created ReplicaSet's name will include the `pod-template-hash`. 4) Describe the generated ReplicaSet. ``` -$ kubectl describe rs deploy-example- +kubectl describe rs deploy-example- ``` Look at both the `Labels` and the `Selectors` fields. The `pod-template-hash` value has automatically been added to both the Labels and Selector of the ReplicaSet. Then take note of the `Controlled By` field. This will reference the @@ -239,13 +239,13 @@ direct parent object, and in this case the original `deploy-example` Deployment. 5) Now, get the Pods and pass the `--show-labels` flag. ``` -$ kubectl get pods --show-labels +kubectl get pods --show-labels ``` Just as with the ReplicaSet, the Pods name are labels include the `pod-template-hash`. 6) Describe one of the Pods. ``` -$ kubectl describe pod deploy-example- +kubectl describe pod deploy-example- ``` Look at the `Controlled By` field. It will contain a reference to the parent ReplicaSet, but not the parent Deployment. @@ -255,15 +255,15 @@ Now that the relationship from Deployment to ReplicaSet to Pod is understood. It 7) Update the `deploy-example` manifest and add a few additional labels to the Pod template. Once done, apply the change with the `--record` flag. ``` -$ kubectl apply -f manifests/deploy-example.yaml --record +kubectl apply -f manifests/deploy-example.yaml --record < or > -$ kubectl edit deploy deploy-example --record +kubectl edit deploy deploy-example --record ``` **Tip:** `deploy` can be substituted for `deployment` when using `kubectl`. 8) Immediately watch the Pods. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` The old version of the Pods will be phased out one at a time and instances of the new version will take its place. The way in which this is controlled is through the `strategy` stanza. For specific documentation this feature, see @@ -271,27 +271,27 @@ the [Deployment Strategy Documentation](https://kubernetes.io/docs/concepts/work 9) Now view the ReplicaSets. ``` -$ kubectl get rs --show-labels +kubectl get rs --show-labels ``` There will now be two ReplicaSets, with the previous version of the Deployment being scaled down to 0. 10) Now, scale the Deployment up as you would a ReplicaSet, and set the `replicas=5`. ``` -$ kubectl scale deploy deploy-example --replicas=5 +kubectl scale deploy deploy-example --replicas=5 ``` 11) List the ReplicaSets. ``` -$ kubectl get rs --show-labels +kubectl get rs --show-labels ``` Note that there is **NO** new ReplicaSet generated. Scaling actions do **NOT** trigger a change in the Pod Template. 12) Just as before, describe the Deployment, ReplicaSet and one of the Pods. Note the `Events` and `Controlled By` fields. It should present a clear picture of relationship between objects during an update of a Deployment. ``` -$ kubectl describe deploy deploy-example -$ kubectl describe rs deploy-example- -$ kubectl describe pod deploy-example- +kubectl describe deploy deploy-example +kubectl describe rs deploy-example- +kubectl describe pod deploy-example- ``` --- @@ -314,33 +314,33 @@ have not, complete it first before continuing. 1) Use the `rollout` command to view the `history` of the Deployment `deploy-example`. ``` -$ kubectl rollout history deployment deploy-example +kubectl rollout history deployment deploy-example ``` There should be two revisions. One for when the Deployment was first created, and another when the additional Labels were added. The number of revisions saved is based off of the `revisionHistoryLimit` attribute in the Deployment spec. 2) Look at the details of a specific revision by passing the `--revision=` flag. ``` -$ kubectl rollout history deployment deploy-example --revision=1 -$ kubectl rollout history deployment deploy-example --revision=2 +kubectl rollout history deployment deploy-example --revision=1 +kubectl rollout history deployment deploy-example --revision=2 ``` Viewing the specific revision will display a summary of the Pod Template. 3) Choose to go back to revision `1` by using the `rollout undo` command. ``` -$ kubectl rollout undo deployment deploy-example --to-revision=1 +kubectl rollout undo deployment deploy-example --to-revision=1 ``` **Tip:** The `--to-revision` flag can be omitted if you wish to just go back to the previous configuration. 4) Immediately watch the Pods. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` They will cycle through rolling back to the previous revision. 5) Describe the Deployment `deploy-example`. ``` -$ kubectl describe deployment deploy-example +kubectl describe deployment deploy-example ``` The events will describe the scaling back of the previous and switching over to the desired revision. @@ -402,56 +402,56 @@ spec: nodeType: edge containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/ds-example.yaml --record +kubectl create -f manifests/ds-example.yaml --record ``` 2) View the current DaemonSets. ``` -$ kubectl get daemonset +kubectl get daemonset ``` As there are no matching nodes, no Pods should be scheduled. 3) Label the `minikube` node with `nodeType=edge` ``` -$ kubectl label node minikube nodeType=edge +kubectl label node minikube nodeType=edge ``` 4) View the current DaemonSets once again. ``` -$ kubectl get daemonsets +kubectl get daemonsets ``` There should now be a single instance of the DaemonSet `ds-example` deployed. 5) View the current Pods and display their labels with `--show-labels`. ``` -$ kubectl get pods --show-labels +kubectl get pods --show-labels ``` Note that the deployed Pod has a `controller-revision-hash` label. This is used like the `pod-template-hash` in a Deployment to track and allow for rollback functionality. 6) Describing the DaemonSet will provide you with status information regarding it's Deployment cluster wide. ``` -$ kubectl describe ds ds-example +kubectl describe ds ds-example ``` **Tip:** `ds` can be substituted for `daemonset` when using `kubectl`. 7) Update the DaemonSet by adding a few additional labels to the Pod Template and use the `--record` flag. ``` -$ kubectl apply -f manifests/ds-example.yaml --record +kubectl apply -f manifests/ds-example.yaml --record < or > -$ kubectl edit ds ds-example --record +kubectl edit ds ds-example --record ``` 8) Watch the Pods and be sure to show the labels. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` The old version of the DaemonSet will be phased out one at a time and instances of the new version will take its place. Similar to Deployments, DaemonSets have their own equivalent to a Deployment's `strategy` in the form of @@ -479,33 +479,33 @@ the previous exercise [Managing DaemonSets](#exercise-managing-daemonsets) and i 1) Use the `rollout` command to view the `history` of the DaemonSet `ds-example` ``` -$ kubectl rollout history ds ds-example +kubectl rollout history ds ds-example ``` There should be two revisions. One for when the Deployment was first created, and another when the additional Labels were added. The number of revisions saved is based off of the `revisionHistoryLimit` attribute in the DaemonSet spec. 2) Look at the details of a specific revision by passing the `--revision=` flag. ``` -$ kubectl rollout history ds ds-example --revision=1 -$ kubectl rollout history ds ds-example --revision=2 +kubectl rollout history ds ds-example --revision=1 +kubectl rollout history ds ds-example --revision=2 ``` Viewing the specific revision will display the Pod Template. 3) Choose to go back to revision `1` by using the `rollout undo` command. ``` -$ kubectl rollout undo ds ds-example --to-revision=1 +kubectl rollout undo ds ds-example --to-revision=1 ``` **Tip:** The `--to-revision` flag can be omitted if you wish to just go back to the previous configuration. 4) Immediately watch the Pods. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` They will cycle through rolling back to the previous revision. 5) Describe the DaemonSet `ds-example`. ``` -$ kubectl describe ds ds-example +kubectl describe ds ds-example ``` The events will be sparse with a single host, however in an actual Deployment they will describe the status of updating the DaemonSet cluster wide, cycling through hosts one-by-one. @@ -570,9 +570,9 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: www mountPath: /usr/share/nginx/html @@ -589,12 +589,12 @@ spec: **Command** ``` -$ kubectl create -f manifests/sts-example.yaml +kubectl create -f manifests/sts-example.yaml ``` 2) Immediately watch the Pods being created. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` Unlike Deployments or DaemonSets, the Pods of a StatefulSet are created one-by-one, going by their ordinal index. Meaning, `sts-example-0` will fully be provisioned before `sts-example-1` starts up. Additionally, take notice of @@ -604,38 +604,38 @@ Template and enables rollback functionality. 3) More information on the StatefulSet can be gleaned about the state of the StatefulSet by describing it. ``` -$ kubectl describe statefulset sts-example +kubectl describe statefulset sts-example ``` Within the events, notice that it is creating claims for volumes before each Pod is created. 4) View the current Persistent Volume Claims. ``` -$ kubectl get pvc +kubectl get pvc ``` The StatefulSet controller creates a volume for each instance based off the `volumeClaimTemplate`. It prepends the volume name to the Pod name. e.g. `www-sts-example-0`. 5) Update the StatefulSet's Pod Template and add a few additional labels. ``` -$ kubectl apply -f manifests/sts-example.yaml --record +kubectl apply -f manifests/sts-example.yaml --record < or > -$ kubectl edit statefulset sts-example --record +kubectl edit statefulset sts-example --record ``` 6) Return to watching the Pods. ``` -$ kubectl get pods --show-labels +kubectl get pods --show-labels ``` None of the Pods are being updated to the new version of the Pod. 7) Delete the `sts-example-2` Pod. ``` -$ kubectl delete pod sts-example-2 +kubectl delete pod sts-example-2 ``` 8) Immediately get the Pods. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` The new `sts-example-2` Pod should be created with the new additional labels. The `OnDelete` Update Strategy will not spawn a new iteration of the Pod until the previous one was **deleted**. This allows for manual gating the @@ -643,14 +643,14 @@ update process for the StatefulSet. 9) Update the StatefulSet and change the Update Strategy Type to `RollingUpdate`. ``` -$ kubectl apply -f manifests/sts-example.yaml --record +kubectl apply -f manifests/sts-example.yaml --record < or > -$ kubectl edit statefulset sts-example --record +kubectl edit statefulset sts-example --record ``` 10) Immediately watch the Pods once again. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` Note that the Pods are sequentially updated in descending order, or largest to smallest based on the Pod's ordinal index. This means that if `sts-example-2` was not updated already, it would be updated first, then @@ -658,24 +658,24 @@ Pod's ordinal index. This means that if `sts-example-2` was not updated already, 11) Delete the StatefulSet `sts-example` ``` -$ kubectl delete statefulset sts-example +kubectl delete statefulset sts-example ``` 12) View the Persistent Volume Claims. ``` -$ kubectl get pvc +kubectl get pvc ``` Created PVCs are **NOT** garbage collected automatically when a StatefulSet is deleted. They must be reclaimed independently of the StatefulSet itself. 13) Recreate the StatefulSet using the same manifest. ``` -$ kubectl create -f manifests/sts-example.yaml --record +kubectl create -f manifests/sts-example.yaml --record ``` 14) View the Persistent Volume Claims again. ``` -$ kubectl get pvc +kubectl get pvc ``` Note that new PVCs were **NOT** provisioned. The StatefulSet controller assumes if the matching name is present, that PVC is intended to be used for the associated Pod. @@ -710,32 +710,32 @@ spec: app: stateful ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 ``` **Command** ``` -$ kubectl create -f manifests/service-sts-example.yaml +kubectl create -f manifests/service-sts-example.yaml ``` 2) Describe the newly created service ``` -$ kubectl describe svc app +kubectl describe svc app ``` Notice that it does not have a `clusterIP`, but does have the Pod Endpoints listed. Headless services are unique in this behavior. 3) Query the DNS entry for the `app` service. ``` -$ kubectl exec sts-example-0 -- nslookup app.default.svc.cluster.local +kubectl exec sts-example-0 -- nslookup app.default.svc.cluster.local ``` An A record will have been returned for each instance of the StatefulSet. Querying the service directly will do simple DNS round-robin load-balancing. 4) Finally, query one of instances directly. ``` -$ kubectl exec sts-example-0 -- nslookup sts-example-1.app.default.svc.cluster.local +kubectl exec sts-example-0 -- nslookup sts-example-1.app.default.svc.cluster.local ``` This is a unique feature to StatefulSets. This allows for services to directly interact with a specific instance of a Pod. If the Pod is updated and obtains a new IP, the DNS record will immediately point to it enabling consistent @@ -802,12 +802,12 @@ spec: **Command** ``` -$ kubectl create -f manifests/job-example.yaml +kubectl create -f manifests/job-example.yaml ``` 2) Watch the Pods as they are being created. ``` -$ kubectl get pods --show-labels --watch +kubectl get pods --show-labels --watch ``` Only two Pods are being provisioned at a time; adhering to the `parallelism` attribute. This is done until the total number of `completions` is satisfied. Additionally, the Pods are labeled with `controller-uid`, this acts as a @@ -818,17 +818,17 @@ This is intentional to better support troubleshooting. 3) A summary of these events can be seen by describing the Job itself. ``` -$ kubectl describe job job-example +kubectl describe job job-example ``` 4) Delete the job. ``` -$ kubectl delete job job-example +kubectl delete job job-example ``` 5) View the Pods once more. ``` -$ kubectl get pods +kubectl get pods ``` The Pods will now be deleted. They are cleaned up when the Job itself is removed. @@ -875,19 +875,19 @@ spec: **Command** ``` -$ kubectl create -f manifests/cronjob-example.yaml +kubectl create -f manifests/cronjob-example.yaml ``` 2) Give it some time to run, and then list the Jobs. ``` -$ kubectl get jobs +kubectl get jobs ``` There should be at least one Job named in the format `-`. Note the timestamp of the oldest Job. 3) Give it a few minutes and list the Jobs once again ``` -$ kubectl get jobs +kubectl get jobs ``` The oldest Job should have been removed. The CronJob controller will purge Jobs according to the `successfulJobHistoryLimit` and `failedJobHistoryLimit` attributes. In this case, it is retaining strictly the @@ -895,13 +895,13 @@ last 3 successful Jobs. 4) Describe the CronJob `cronjob-example` ``` -$ kubectl describe CronJob cronjob-example +kubectl describe CronJob cronjob-example ``` The events will show the records of the creation and deletion of the Jobs. 5) Edit the CronJob `cronjob-example` and locate the `Suspend` field. Then set it to true. ``` -$ kubectl edit CronJob cronjob-example +kubectl edit CronJob cronjob-example ``` This will prevent the cronjob from firing off any future events, and is useful to do to initially troubleshoot an issue without having to delete the CronJob directly. @@ -909,7 +909,7 @@ an issue without having to delete the CronJob directly. 5) Delete the CronJob ``` -$ kubectl delete cronjob cronjob-example +kubectl delete cronjob cronjob-example ``` Deleting the CronJob **WILL** delete all child Jobs. Use `Suspend` to _'stop'_ the Job temporarily if attempting to troubleshoot. diff --git a/workloads/manifests/deploy-example.yaml b/workloads/manifests/deploy-example.yaml index e1cb636..cedb9fd 100644 --- a/workloads/manifests/deploy-example.yaml +++ b/workloads/manifests/deploy-example.yaml @@ -20,8 +20,8 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 diff --git a/workloads/manifests/ds-example.yaml b/workloads/manifests/ds-example.yaml index 393d23b..6ee5b66 100644 --- a/workloads/manifests/ds-example.yaml +++ b/workloads/manifests/ds-example.yaml @@ -16,6 +16,6 @@ spec: nodeType: edge containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 diff --git a/workloads/manifests/pod-rs-example.yaml b/workloads/manifests/pod-rs-example.yaml index 2c0eb3c..4295821 100644 --- a/workloads/manifests/pod-rs-example.yaml +++ b/workloads/manifests/pod-rs-example.yaml @@ -8,6 +8,6 @@ metadata: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 diff --git a/workloads/manifests/rs-example.yaml b/workloads/manifests/rs-example.yaml index 0d42103..38087e5 100644 --- a/workloads/manifests/rs-example.yaml +++ b/workloads/manifests/rs-example.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 diff --git a/workloads/manifests/service-sts-example.yaml b/workloads/manifests/service-sts-example.yaml index 56cc329..5da4a04 100644 --- a/workloads/manifests/service-sts-example.yaml +++ b/workloads/manifests/service-sts-example.yaml @@ -8,5 +8,5 @@ spec: app: stateful ports: - protocol: TCP - port: 80 - targetPort: 80 + port: 8081 + targetPort: 8081 diff --git a/workloads/manifests/sts-example.yaml b/workloads/manifests/sts-example.yaml index 76d0cd2..c0935b6 100644 --- a/workloads/manifests/sts-example.yaml +++ b/workloads/manifests/sts-example.yaml @@ -18,9 +18,9 @@ spec: spec: containers: - name: nginx - image: nginx:stable-alpine + image: twalter/openshift-nginx ports: - - containerPort: 80 + - containerPort: 8081 volumeMounts: - name: www mountPath: /usr/share/nginx/html