diff --git a/ansible-101/.gitignore b/ansible-101/.gitignore index 80185ac..5c88495 100644 --- a/ansible-101/.gitignore +++ b/ansible-101/.gitignore @@ -7,3 +7,7 @@ *.retry core* *_python_intro.ipynb +notebooks/rendered_notebooks/ +vault.yml +vault.yaml +.pin diff --git a/ansible-101/Dockerfile.ssh b/ansible-101/Dockerfile.ssh index 6f2ca1b..89df961 100644 --- a/ansible-101/Dockerfile.ssh +++ b/ansible-101/Dockerfile.ssh @@ -1,7 +1,15 @@ -FROM itscaro/debian-ssh:latest -RUN apt-get -y update && apt-get -y install python sshpass +FROM debian:stable-slim +RUN apt-get -y update && apt-get -y install python sshpass openssh-server RUN apt-get -y clean RUN apt-get -y autoremove +# Since this is a tutorial machine, we +# need to simplify things a bit. RUN echo "UseDNS no" >> /etc/ssh/sshd_config RUN echo "PermitRootLogin yes" >> /etc/ssh/sshd_config + +# Create privilege separation directory. +RUN mkdir -p /run/sshd + +ENTRYPOINT ["/usr/sbin/sshd"] +CMD [ "-D", "-e"] diff --git a/ansible-101/Makefile b/ansible-101/Makefile index c0df8b6..e692fa1 100644 --- a/ansible-101/Makefile +++ b/ansible-101/Makefile @@ -6,7 +6,7 @@ .PHONY: course MAIN_IP = $(shell ip -4 -o a s scope global | awk -F'[/ ]+' '{print $$4; exit} ') -EXTERNAL_COURSES = git-101 python-basic +EXTERNAL_COURSES = git-101 python-basic docker-101 course: .prepare diff --git a/ansible-101/README.md b/ansible-101/README.md index e66c906..2556d33 100644 --- a/ansible-101/README.md +++ b/ansible-101/README.md @@ -6,16 +6,16 @@ This course is based on Docker and Docker compose. As long as you have Docker you can run it on: - - linux - - mac - - windows +- linux +- mac +- windows Docker should be exposed via TCP on 172.17.0.1:2375 On Linux, set -``` -# vim /etc/systemd/system/multi-user.target.wants/docker.service +```bash +$ vim /etc/systemd/system/multi-user.target.wants/docker.service [Service] ... ExecStart=/usr/bin/dockerd -H fd:// -H tcp://172.17.0.1:2375 @@ -24,28 +24,29 @@ ExecStart=/usr/bin/dockerd -H fd:// -H tcp://172.17.0.1:2375 On Mac, check [the FAQ and this issue](https://github.com/docker/for-mac/issues/770#issuecomment-252560286) - Consider having enough bandwidth for the first run to download the images. - Run the environemnt with: - # make course +```bash +make course +``` or - # docker-compose scale course=1 bastion=1 web=3 - # firefox http://localhost:8888 - +```bash +docker-compose scale course=1 bastion=1 web=3 +firefox http://localhost:8888 +``` ## Playing the course on DigitalOcean DigitalOcean is a great and cheap cloud-provider - - create an Ubuntu 16.04 docker droplet from the menu and ssh into your host - - expose docker on local http port +- create an Ubuntu 16.04 docker droplet from the menu and ssh into your host +- expose docker on local http port -``` +```bash # vim /etc/systemd/system/multi-user.target.wants/docker.service [Service] ... @@ -53,33 +54,48 @@ ExecStart=/usr/bin/dockerd -H fd:// -H tcp://172.17.0.1:2375 ... ``` - - clone and run the project +- clone and run the project +```bash +git clone https://github.com/ioggstream/python-course.git +cd python-course/ansible-101 +make course ``` - # git clone https://github.com/ioggstream/python-course.git - # cd python-course/ansible-101 - # make course - -``` - - - point to the reference url +- point to the reference url See asciicast here [![asciicast](https://asciinema.org/a/9xqX4akNND7Yc0Q1sTb3ZnEhI.png)](https://asciinema.org/a/9xqX4akNND7Yc0Q1sTb3ZnEhI) +## Prerequisites + +After `make course`, prerequisites are in `/notebooks/rendered_notebooks`. + +Prerequisites can be found in the home directory: + +- Introduction to jupyter: logging in, showing the python interface, working with notebooks, opening terminals. +- [Git 101](https://github.com/ioggstream/python-course/blob/master/git-101/notebooks/01-git.ipynb) +- [Python basics](https://github.com/ioggstream/python-course/blob/master/python-basic/README.md) +- Docker ## Outline +- 1. Prerequisites linked in [intro.ipynb](intro.ipynb) +- Ansible architecture +- Describe delivery layout in ansible.cfg +- Host and Group variables, Filters + +- 2. Static and dynamic inventories (docker) +- Vaults and Secrets +- Use bastions and other ssh_opts + +- 3. writing basic playbooks, test driven deployment +- YAML pitfalls +- Inclusion and Roles +- Ansible galaxy as a role repository + +## Advanced topics - - Ansible architecture - - Describe delivery layout in ansible.cfg - - Host and Group variables, Filters - - Static and dynamic inventories (docker) - - Vaults and Secrets - - Use bastions and other ssh_opts - - writing basic playbooks, test driven deployment - - Yaml pitfalls - - Inclusion and Roles - - Ansible galaxy as a role repository (bonus track) +- 4. AWX Introduction +- Example usage of AWX diff --git a/ansible-101/cloudinit.txt b/ansible-101/cloudinit.txt new file mode 100644 index 0000000..46a9e9a --- /dev/null +++ b/ansible-101/cloudinit.txt @@ -0,0 +1,12 @@ +#!/bin/bash +# This is NOT a cloud-init file +# as https://docs.digitalocean.com/products/droplets/how-to/provide-user-data/ suggests, +# we may insert a payload script. + +# If we had cloud-init file, we could have used this: +# package_update: false +# package_upgrade: false + +# but we are simply rewriting this config file +echo "APT::Periodic::Update-Package-Lists \"0\";" > /etc/apt/apt.conf.d/20auto-upgrades +echo "APT::Periodic::Unattended-Upgrade \"0\";" >> /etc/apt/apt.conf.d/20auto-upgrades diff --git a/ansible-101/deleteme-digitalocean.yml b/ansible-101/deleteme-digitalocean.yml new file mode 100644 index 0000000..86279e6 --- /dev/null +++ b/ansible-101/deleteme-digitalocean.yml @@ -0,0 +1,26 @@ +# +# Delete the current infrastructure on digitalocean +# $ export DO_API_TOKEN=xxxx +# $ ansible-playbook -v deleteme-digitalocean.yml +# +# This will output a series of jupyter urls with their associate ips. +# +# BEWARE: unless you populate accordingly the digital_ocean droplet ids +# the digital_ocean module will create those droplets over and +# over again. +# +- hosts: localhost + gather_facts: false + tasks: + - name: Delete machine for the course. + community.digitalocean.digital_ocean_droplet: + state: absent + name: "{{item}}" + unique_name: yes + region: fra1 + image: docker-18-04 + wait_timeout: 500 + register: my_droplet + with_items: + - deleteme-1 + - deleteme-2 diff --git a/ansible-101/notebooks/00_teaser.ipynb b/ansible-101/notebooks/00_teaser.ipynb index 2a4a719..8341c00 100644 --- a/ansible-101/notebooks/00_teaser.ipynb +++ b/ansible-101/notebooks/00_teaser.ipynb @@ -1,17 +1,12 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Ansible\n", - "\n", - "\n", - "Ansible is a configuration management software.\n", - "\n", - "Connects via ssh or docker to a list of inventory machines and executes a series of tasks eventually grouped in playbooks.\n", - "\n", - "At first, create an inventory file with all your hosts." + "#### ([exercise directory](/tree/notebooks/exercise-00))" ] }, { @@ -23,6 +18,20 @@ "cd /notebooks/exercise-00 " ] }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "Ansible is a configuration management software.\n", + "\n", + "Connects via ssh or docker to a list of inventory machines and executes a series of tasks eventually grouped in playbooks.\n", + "\n", + "At first, create an inventory file with all your hosts." + ] + }, { "cell_type": "code", "execution_count": null, @@ -33,6 +42,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -63,6 +73,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -70,6 +81,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -105,6 +117,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -151,6 +164,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -178,6 +192,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -206,6 +221,15 @@ "!ansible-playbook -i inventory site.yml --limit=course # in this case the --limit does not change anything ;)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!ansible --version" + ] + }, { "cell_type": "code", "execution_count": null, @@ -230,7 +254,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.15" + "version": "2.7.14" } }, "nbformat": 4, diff --git a/ansible-101/notebooks/01_architecture.ipynb b/ansible-101/notebooks/01_architecture.ipynb index 162db31..6471831 100644 --- a/ansible-101/notebooks/01_architecture.ipynb +++ b/ansible-101/notebooks/01_architecture.ipynb @@ -1,120 +1,129 @@ { "cells": [ { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Architecture\n", + "#### ([exercise directory](/tree/notebooks/exercise-00))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution": "hidden" + }, + "outputs": [], + "source": [ + "cd /notebooks/exercise-00/" + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Ansible is\n", "\n", - "\n", - " - configuration manager \n", - " - simple \n", - " - extensible via modules\n", - " - written in python\n", - " - broad community\n", - " - many external tools\n", - " - playbook repository\n", - " - used by openstack, openshift & tonns of project\n", - " \n", - " \n", + "- configuration manager \n", + "- simple \n", + "- extensible via modules\n", + "- written in python\n", + "- broad community\n", + "- many external tools\n", + "- playbook repository\n", + "- used by openstack & tonns of project" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ " # Configuration Manager\n", " \n", - "Explain infrastructure as code\n", - " " + "Explain infrastructure as code (e.g., remember `docker-compose`?):\n", + "\n", + "- managing data centers through machine-readable definition files;\n", + "- install, configure, decommission physical and virtual resources;\n", + "- avoid manual processes;\n", + "- speed ++, costs --, risk -- (see [Google SRE Book](https://sre.google/workbook/postmortem-culture/) );\n", + "- declarative (describe the expected state) or imperative (describe the steps) approaches;\n", + "- push or pull model;\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNrNVMFu2zAM_RVBhyEB4sB2Yjfxhh7W9RB0RYclp0U9KDaTGLElT5LbuXH-fZQVdw3QATvuYpP0I_keKetIU5kBTajneUyY3BSQkIXYKq6NqlNTKyBckxsEMdFhdopXe_L1OxNMZPA0GDD6QWx09dE9tzzZcq_WoIgzC14ZWb2FsC7xbNPh0Ba-Jq2DWzpeVReFp-BnDdog2rrk7LZoVHI9-INGgA3p3EjVoKNTlVdGo2WgrApuwNp7qY0ePrpeIHa5AKvAWQSVkfYaYQ93Lckv5HdkLaD9hN_vHto-uU8de7LdgSEZN7wli9Xyfm0fZ_k4iKfXWeyAK40cdL1xY3Q11gt-Q25duUsgF7xodK672ZMnXuTYJZfimO4hPZxcOIOqkE0JwlhSmZX12uBSy_pys9ggyxWktiJZfcZBK5mtGf2Gr7qLouJ3ZLzj9Dg7hA3XvY6_uD1agHmW6uA9I4uM_GOM0Ue7Wm2Q6codkP-T43kXRS4OS9MUQCYERy8PkFjEm3jYx3cKADPoiJagSp5n-GMemSCEUbOHEhhN0Mxgy-vCMMrECaF1hZThNrPnnya4WxhRXhu5bETa-w7zJed4KkqKdAuN0YqLH1KWPQhdmhzpL5oE0-l46kdR4E_DyWzuz0e0oUk4j8ZxNI8nYRyH4Sy4Oo3oS5fvj-MgCv0oiGdX4dSP_WBEoSN0726X7pI5_QYwkpLJ?type=png)](https://mermaid.live/edit#pako:eNrNVMFu2zAM_RVBhyEB4sB2Yjfxhh7W9RB0RYclp0U9KDaTGLElT5LbuXH-fZQVdw3QATvuYpP0I_keKetIU5kBTajneUyY3BSQkIXYKq6NqlNTKyBckxsEMdFhdopXe_L1OxNMZPA0GDD6QWx09dE9tzzZcq_WoIgzC14ZWb2FsC7xbNPh0Ba-Jq2DWzpeVReFp-BnDdog2rrk7LZoVHI9-INGgA3p3EjVoKNTlVdGo2WgrApuwNp7qY0ePrpeIHa5AKvAWQSVkfYaYQ93Lckv5HdkLaD9hN_vHto-uU8de7LdgSEZN7wli9Xyfm0fZ_k4iKfXWeyAK40cdL1xY3Q11gt-Q25duUsgF7xodK672ZMnXuTYJZfimO4hPZxcOIOqkE0JwlhSmZX12uBSy_pys9ggyxWktiJZfcZBK5mtGf2Gr7qLouJ3ZLzj9Dg7hA3XvY6_uD1agHmW6uA9I4uM_GOM0Ue7Wm2Q6codkP-T43kXRS4OS9MUQCYERy8PkFjEm3jYx3cKADPoiJagSp5n-GMemSCEUbOHEhhN0Mxgy-vCMMrECaF1hZThNrPnnya4WxhRXhu5bETa-w7zJed4KkqKdAuN0YqLH1KWPQhdmhzpL5oE0-l46kdR4E_DyWzuz0e0oUk4j8ZxNI8nYRyH4Sy4Oo3oS5fvj-MgCv0oiGdX4dSP_WBEoSN0726X7pI5_QYwkpLJ)\n" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, + "source": [ + "\n", + "Trick: use a light development environment based on the introductory session.\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNp1Uk1v2zAM_SsCD0MCxEVqp67jFru0OwxoL91tVjEoNpMYtSVNH0HdJP99lK1g62EwRJDy4-MjqSPUqkEoIUkSLl3rOizZd7k1wjrja-cNMmHZA4G4HDE7I_SePb1wGb4GD7MZhy9yY_XdZLei3IrEWzRscjuhndL_QviYGH2YzwOT9ZuJ2qBW1QsZ2zplhkgSZJK7Cf6uda_E0BqsXavkKCZk_apmuhPDRqk3OydErxrfoa1m0bnk68HtlQwIlE3sInT3lZ3-Fku077rE4G-P1pHgELIYnkaRUwrKXSvxUwdEh_JQPeKB_h4-TYFq1qqn3rB6UNIJSjWW2KOyRtVv49z-G71GtVF5JGO0mtP9KVRmXMICejS9aBta7JFLxji4PfbIoSS3wa3wnePA5ZmgXjfC4bcmTBtK2jouQHinfgyyvsQT5rEV1GAPpKazdKuF_KlUfwFRCOUR3qG8zourPF2u8tt0nRbrfFUsYKDroriis86DSW-y2-y8gI-RYUn4LE2z6_Rmna3y5ZIycJT0PL3P8Zme_wCRge2O?type=png)](https://mermaid.live/edit#pako:eNp1Uk1v2zAM_SsCD0MCxEVqp67jFru0OwxoL91tVjEoNpMYtSVNH0HdJP99lK1g62EwRJDy4-MjqSPUqkEoIUkSLl3rOizZd7k1wjrja-cNMmHZA4G4HDE7I_SePb1wGb4GD7MZhy9yY_XdZLei3IrEWzRscjuhndL_QviYGH2YzwOT9ZuJ2qBW1QsZ2zplhkgSZJK7Cf6uda_E0BqsXavkKCZk_apmuhPDRqk3OydErxrfoa1m0bnk68HtlQwIlE3sInT3lZ3-Fku077rE4G-P1pHgELIYnkaRUwrKXSvxUwdEh_JQPeKB_h4-TYFq1qqn3rB6UNIJSjWW2KOyRtVv49z-G71GtVF5JGO0mtP9KVRmXMICejS9aBta7JFLxji4PfbIoSS3wa3wnePA5ZmgXjfC4bcmTBtK2jouQHinfgyyvsQT5rEV1GAPpKazdKuF_KlUfwFRCOUR3qG8zourPF2u8tt0nRbrfFUsYKDroriis86DSW-y2-y8gI-RYUn4LE2z6_Rmna3y5ZIycJT0PL3P8Zme_wCRge2O)\n", + "\n", + "Ansible uses the push model and supports both declarative and imperative approaches." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "solution": "hidden" + }, "source": [ " # Advantages\n", " \n", " No agents: ansible copies python and all deployment scripts/modules to the target machine via ssh and executes them remotely. Some modules though require that target hosts contain specific python libraries.\n", " \n", " Jobs are executed in parallel, but you can configure for serialization using different strategies for speed up, rollout or other purposes: (link)\n", - " \n", - " ![Ansible architecture](https://cdn.pbrd.co/images/390yWc9H0.png \"Ansible architecture\")\n", - " \n", + " \n", " Authentication can be passwordless (ssh/pki, kerberos) or with password.\n", " \n", - " Automation jobs (Playbooks) are described via YAML - a very concise and simple language. You can validate and lint files with yamllint and ansible-lint.\n", + " Automation jobs (Playbooks) are described via YAML - a very concise and simple language.\n", + " You can validate and lint files with yamllint and [ansible-lint](https://github.com/ansible/ansible-lint).\n", " \n", - "```\n", + "```yaml\n", "this_is:\n", " a: yaml\n", "\n", "file:\n", "- with dict\n", "- a list\n", - " \n", - " \n", "```\n", " \n", - " Passwords are supported, but SSH keys with ssh-agent are one of the best ways to use Ansible. Though if you want to use Kerberos, that's good too. \n", + "Passwords are supported, but SSH keys with ssh-agent are one of the best ways to use Ansible. Though if you want to use Kerberos, that's good too. \n", " \n", "You have a lot of options! Root logins are not required, you can login as any user, and then su or sudo to any user." ] }, { "cell_type": "code", - "execution_count": 1, - "metadata": { - "solution": "hidden" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/notebooks/exercise-00\n" - ] - } - ], - "source": [ - "cd /notebooks/exercise-00/" - ] - }, - { - "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - ".\r\n", - "├── ansible.cfg\r\n", - "├── group_vars\r\n", - "│   ├── all\r\n", - "│   └── staging\r\n", - "├── inventory\r\n", - "├── mytemplate.j2\r\n", - "├── python-course-test.yml\r\n", - "├── site.yml\r\n", - "└── staging\r\n", - "\r\n", - "1 directory, 8 files\r\n" - ] - } - ], + "outputs": [], "source": [ "# Let's check our ansible directory\n", "!tree" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## ansible.cfg \n", "\n", - "It's the main configuration file. While all ansible are in `yaml`, ansible.cfg is in .ini format. Eg.\n", + "It's the main configuration file. While all ansible are in YAML, `ansible.cfg` is in .ini format. Eg.\n", "\n", "```\n", + "# This is a .ini file.\n", "[stanza]\n", "key = value\n", - "\n", "```\n", "\n", "Let's check the content of a sample ansible.cfg:\n", @@ -126,96 +135,31 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "#\r\n", - "# Always use ansible.cfg and inventory files to \r\n", - "# describe your deployment! \r\n", - "#\r\n", - "# This will save time and helps your colleagues to\r\n", - "# keep in touch with the environment.\r\n", - "#\r\n", - "[defaults]\r\n", - "# define our inventory file or ansible defaults\r\n", - "# to /etc/ansible/hosts.\r\n", - "inventory = inventory\r\n", - "\r\n", - "# It's just for testing purposes (or if you're on a secure cloud)\r\n", - "# you may want to avoid typing yes to all host keys.\r\n", - "# Once you get host keys, you can comment this out and re-enable\r\n", - "# checks.\r\n", - "host_key_checking = False\r\n", - "\r\n", - "# When an ansible plabook fails, it creates a retry file which may\r\n", - "# pollute the current directory ;) You can move this out using\r\n", - "# further variables\r\n", - "retry_files_enabled = False\r\n", - "\r\n", - "# Search password file in here\r\n", - "# vault_password_file = ~/.ssh/pin\r\n", - "\r\n", - "\r\n", - "# [ssh_connection]\r\n", - "# ssh_args = -F ./ssh_config/config -o ControlMaster=auto -o ControlPersist=30m\r\n", - "# control_path = ./ssh_config/ansible-%%r@%%h:%%p\r\n" - ] - } - ], + "outputs": [], "source": [ "!cat ansible.cfg" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Inventories\n", "\n", - "a simple inventory file contains a static list of nodes to contact.\n", + "A simple inventory file contains a static list of nodes to contact.\n", "\n", - "Generally, an [inventory can be static or dynamic](http://docs.ansible.com/ansible/intro_inventory.html), as we will see in the following lessons.\n", + "Generally, an [inventory can be static or dynamic](https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html), as we will see in the following lessons.\n", "\n" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "#\r\n", - "# This inventory file contains a list of server to \r\n", - "# play with - divided in groups.\r\n", - "#\r\n", - "[course]\r\n", - "# this is the local machine where you run jupyter\r\n", - "# the tutorial just works with this one.\r\n", - "pythonforsysadmin_course_1 ansible_connection=local\r\n", - "\r\n", - "\r\n", - "# Another group of servers\r\n", - "# where we can pass optional arguments\r\n", - "# Homework: you can play with this group of host\r\n", - "# once you exchange ssh-keys between the pythonforsysadmin_course_1\r\n", - "# container and the pythonforsysadmin_ansible_* ones._\r\n", - "[ansible]\r\n", - "172.17.0.[5:7] \r\n", - "\r\n", - "#\r\n", - "# Besides, ansible has two predefined groups: \r\n", - "# - all \r\n", - "# - ungrouped\r\n" - ] - } - ], + "outputs": [], "source": [ "!cat inventory" ] @@ -231,6 +175,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -238,34 +183,27 @@ "\n", "**N.B. ansible environment variables are not related with process environment**\n", "\n", + "#### Exercise:\n", + "\n", + "- using [the terminal](/terminals/ps) identify the process executing this notebook by the current working directory `/proc/*/cwd`;\n", + "- check the environment variables of the process with `cat /proc//environ | od -a`;\n", + "\n", "You defined your host groups in the environment, eg:\n", "\n", " - course\n", " - ansible\n", " - staging\n", " \n", - "Ansible defines [two default groups: all and ungrouped](http://docs.ansible.com/ansible/intro_inventory.html#default-groups).\n", + "Ansible defines [two default groups: all and ungrouped](http://docs.ansible.com/ansible/latest/intro_inventory.html#default-groups).\n", "\n", "You can assign variables to all hosts using the `all` group." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "group_vars\r\n", - "├── all\r\n", - "└── staging\r\n", - "\r\n", - "0 directories, 2 files\r\n" - ] - } - ], + "outputs": [], "source": [ "# group_vars - a directory containing environment files for various host groups.\n", "!tree group_vars" @@ -273,22 +211,9 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "group_vars/all:env_name: default\n", - "group_vars/staging:env_name: staging\n", - "#\n", - "# Pur here the variables for the staging inventory.\n", - "#\n", - "env_name: staging\n" - ] - } - ], + "outputs": [], "source": [ "# I set env_name in two different files\n", "!grep env_name -r group_vars/\n", @@ -305,13 +230,14 @@ "source": [ "# The debug module (-m debug) shows variables' content or dumps messages.\n", "# by default uses the inventory set into ansible.cfg, thus writing\n", - "!ansible all -m debug -a 'var=env_name'\n" + "!ansible all -m debug -a 'var=env_name'" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { - "solution": "hidden", + "solution": "shown", "solution_first": true }, "source": [ @@ -330,12 +256,11 @@ "code_folding": [ 0 ], - "solution": "hidden" + "solution": "shown" }, "outputs": [], "source": [ "# Solution\n", - "\n", "!ansible all -i staging -m debug -a 'var=env_name'" ] }, @@ -349,10 +274,15 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "#### Exercise" + "#### Exercise\n", + "\n", + "- Read the inventory and try to predict the output of the following command\n", + "- Rename the `staging` inventory file to `foobar`. What happens?\n", + "- Restore the original name." ] }, { diff --git a/ansible-101/notebooks/02_delivery_layout.ipynb b/ansible-101/notebooks/02_delivery_layout.ipynb index 9e9af69..ba13836 100644 --- a/ansible-101/notebooks/02_delivery_layout.ipynb +++ b/ansible-101/notebooks/02_delivery_layout.ipynb @@ -1,10 +1,28 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Delivery Layout - ansible.cfg\n", + "#### ([exercise directory](/tree/notebooks/exercise-00))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd /notebooks/exercise-00" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", "When you deliver something you'll probably have a layout:\n", "\n", @@ -14,21 +32,13 @@ " - whether to do privilege escalation (eg. sudo, ...) before running tasks\n", " - if nodes should be accessed via a bastion host, docker, ...\n", " \n", - "Put those informations, together with a brief description of the playbook usage (eg. 2/3 lines) into ansible.cfg\n", + "Put those information, together with a brief description of the playbook usage (eg. 2/3 lines) into `ansible.cfg`\n", "\n", - "![delivery layout](https://cdn.pbrd.co/images/39e3p1vlg.png)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cd /notebooks/exercise-00" + "[![](https://mermaid.ink/img/pako:eNplUz2TmzAQ_SuaLVIBARk4m-KqpEyVLuLmRkYLZg4kRgImjs__PZLAn9eIt0-r3fdW4gSVEggFNJoPB1LKUs586kbmV1LzouZhp6qPt1J-4NEwYw7EgXXLQrtjpv1ynkvT7juMqrphd3hNrtsOiU1X4wH12ui9UrJum1LyaTxcA5TCSbnWbeWMclT6yK7ovub3NWg6tUenJ2GDVmJlDeoZtaMpG9GMX-gN84qiKHreetbRaDUN7zPXht3gRYnqBOpHLc5tMy9qPFwU3CrfDykMXz811qhRVmg-b6adIRJGIbGlnIsV0-ttrXNzFYgnHsbpaXdnXxs-deyVmDo05BvRyn6ZXx_svUEAPeqet8I-mlMpCSnBDq_HEgoLBda-P5TybFOnQfARf4rW2oBi1BMGYJWp30dZXeIl50fL7Yh7sK06Y9mByz9K9ZckG0Jxgr9QJGkapXGWJXFKN9tdvAvgCAXdZVGe7fINzXNKt8nLOYB__nwc5UlG4yzJty80jfM4CQC9oF_Ly_c_wPk_d6YQxA?type=png)](https://mermaid.live/edit#pako:eNplUz2TmzAQ_SuaLVIBARk4m-KqpEyVLuLmRkYLZg4kRgImjs__PZLAn9eIt0-r3fdW4gSVEggFNJoPB1LKUs586kbmV1LzouZhp6qPt1J-4NEwYw7EgXXLQrtjpv1ynkvT7juMqrphd3hNrtsOiU1X4wH12ui9UrJum1LyaTxcA5TCSbnWbeWMclT6yK7ovub3NWg6tUenJ2GDVmJlDeoZtaMpG9GMX-gN84qiKHreetbRaDUN7zPXht3gRYnqBOpHLc5tMy9qPFwU3CrfDykMXz811qhRVmg-b6adIRJGIbGlnIsV0-ttrXNzFYgnHsbpaXdnXxs-deyVmDo05BvRyn6ZXx_svUEAPeqet8I-mlMpCSnBDq_HEgoLBda-P5TybFOnQfARf4rW2oBi1BMGYJWp30dZXeIl50fL7Yh7sK06Y9mByz9K9ZckG0Jxgr9QJGkapXGWJXFKN9tdvAvgCAXdZVGe7fINzXNKt8nLOYB__nwc5UlG4yzJty80jfM4CQC9oF_Ly_c_wPk_d6YQxA)" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -36,7 +46,7 @@ "\n", "When running ansible, the first file read is ansible.cfg, resolved in the following order:\n", "\n", - " - `ANSIBLE_CONFIG` (env var)\n", + " - `ANSIBLE_CONFIG` (process environment variable)\n", " - `./ansible.cfg` (in the current directory)\n", " - `~/ansible.cfg` (in the home directory)\n", " - `/etc/ansible/ansible.cfg`\n", @@ -54,12 +64,13 @@ "\n", "```\n", "\n", - "Always check [ansible source code](https://raw.github.com/ansible/ansible/devel/examples/ansible.cfg) to get in touch with new parameters.\n", + "Always check [ansible source code](https://raw.github.com/ansible/ansible/devel/examples/ansible.cfg) to get in touch with new parameters, for example see the [ansible.cfg stable-2.4](https://github.com/ansible/ansible/blob/stable-2.4/examples/ansible.cfg)\n", "\n", - "We'll create a new ansible.cfg for every project!" + "We'll create a new `ansible.cfg` for every project!" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -90,6 +101,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "solution": "shown", @@ -107,30 +119,31 @@ "cell_type": "code", "execution_count": null, "metadata": { - "code_folding": [], "solution": "shown" }, "outputs": [], "source": [ - "# Solution\n", - "!sed -i 's/^inventory/#inventory/' ansible.cfg\n", - "!ansible -m ping all\n", - "!sed -i 's/#inventory/inventory/' ansible.cfg" + "# Use this cell for the exercise\n", + "!ansible -m ping all" ] }, { "cell_type": "code", "execution_count": null, "metadata": { + "code_folding": [], "solution": "shown" }, "outputs": [], "source": [ - "# Use this cell for the exercise\n", - "!ansible -m ping all" + "# Solution\n", + "!sed -i 's/^inventory/#inventory/' ansible.cfg\n", + "!ansible -m ping all\n", + "!sed -i 's/#inventory/inventory/' ansible.cfg" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "solution": "hidden", @@ -148,29 +161,29 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "code_folding": [ - 0 - ], - "solution": "hidden" - }, + "metadata": {}, "outputs": [], "source": [ - "# Solution\n", - "!ansible -m ping all[0]" + "# Use this cell for the exercise" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "code_folding": [ + 0 + ], + "solution": "hidden" + }, "outputs": [], "source": [ - "# Use this cell for the exercise\n", + "# Solution\n", "!ansible -m ping all[0]" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -182,6 +195,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -215,12 +229,11 @@ }, "outputs": [], "source": [ - "# Write here the answer!\n", - "[defaults] # ansible.cfg\n", - "private_key_file = " + "# Write here the answer!" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -238,6 +251,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -260,6 +274,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -268,7 +283,7 @@ "A bastion host is the unique management entrypoint for an infrastructure.\n", "\n", "Ansible *leverages ssh functionalities* to manage resources from your local machine thru a bastion.\n", - "With a proper configuration you can run your commands/playbooks without continusly moving files to and fro your bastion.\n", + "With a proper configuration you can run your commands/playbooks **without** continusly moving files to and fro your bastion.\n", "\n", "Those includes:\n", " \n", @@ -281,6 +296,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -303,21 +319,21 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.13" + "pygments_lexer": "ipython3", + "version": "3.9.9" } }, "nbformat": 4, diff --git a/ansible-101/notebooks/02_vaults.ipynb b/ansible-101/notebooks/02_vaults.ipynb index ba1832e..6e381f6 100644 --- a/ansible-101/notebooks/02_vaults.ipynb +++ b/ansible-101/notebooks/02_vaults.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -11,15 +12,16 @@ " - ansible.cfg\n", " - inventory\n", "\n", + "[![](https://mermaid.ink/img/pako:eNpVkUFTgzAQhf9KZg-eKAMp0MLBk3rSi94kjpPC0mYKCZMAim3_uwFKxdt7mW_37WZPkKkcIYG95vWBPL8yyaRpd5Pl0ohdiW5W7NOFJgVPCr4qRInkg0mU-VC1BFar-7PGAjXKDM2ZDOhnx9uyIXeTOWJvrBayQ9ko3f_LHQjTmwar9GmQi0Cb99csnVrW3JgvpfMrVqrsOGNDTGrMgdRadLxBYh-umFWWug2Q3tQYf4UM6g71vCQ4UKGuuMjtj52YJIRBc8AKGSRW5lgM8zBg8mLRts5t4mMubFNIGt2iA7xt1Fsvs9lPzIPgdvEKbGJp7GvN5btS1QxZC8kJviHxg8ANvDD0vYCut7EXO9BDQuPQjcI4WtMoonTrby4O_Iz1nhv5IfVCP9puaOBFnu8AjgO9TGcfr3_5BZh-syk?type=png)](https://mermaid.live/edit#pako:eNpVkUFTgzAQhf9KZg-eKAMp0MLBk3rSi94kjpPC0mYKCZMAim3_uwFKxdt7mW_37WZPkKkcIYG95vWBPL8yyaRpd5Pl0ohdiW5W7NOFJgVPCr4qRInkg0mU-VC1BFar-7PGAjXKDM2ZDOhnx9uyIXeTOWJvrBayQ9ko3f_LHQjTmwar9GmQi0Cb99csnVrW3JgvpfMrVqrsOGNDTGrMgdRadLxBYh-umFWWug2Q3tQYf4UM6g71vCQ4UKGuuMjtj52YJIRBc8AKGSRW5lgM8zBg8mLRts5t4mMubFNIGt2iA7xt1Fsvs9lPzIPgdvEKbGJp7GvN5btS1QxZC8kJviHxg8ANvDD0vYCut7EXO9BDQuPQjcI4WtMoonTrby4O_Iz1nhv5IfVCP9puaOBFnu8AjgO9TGcfr3_5BZh-syk)\n", "\n", "We're going to describe the following setup:\n", "\n", - " - direct access to client machines (eg. no bastion)\n", - " - one `inventory` file\n", - " - no host_key_check\n", - " - no retry files\n", - " - client machine username/password is `root:root`\n", - " - no public key is installed by default\n", + " - direct access to client machines (eg. no bastion);\n", + " - one `inventory` file;\n", + " - no host_key_check;\n", + " - no retry files;\n", + " - client machines' username is `root`. The password will be generated via the ansible `lookup()` plugin;\n", + " - no public key is installed by default.\n", "\n" ] }, @@ -33,16 +35,17 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Preparation\n", "\n", - "The first steps include creating:\n", + "The preparation steps include creating:\n", "\n", - " - all secret files (ssh identity, vault pin file)\n", - " - ansible.cfg (the deployment descriptor) \n", - " - and the inventory." + "1. all secret files (ssh keypair, vault pin file)\n", + "1. the inventory\n", + "1. [`ansible.cfg`](/edit/notebooks/exercise-01/ansible.cfg) (the deployment descriptor) referencing the created files" ] }, { @@ -51,17 +54,60 @@ "metadata": {}, "outputs": [], "source": [ - "# At first create a proper ssh key for the project\n", + "# Prepare a deployment descriptor referencing the .pin file.\n", + "# Exercise: open ansible.cfg in the editor add the missing comment lines.\n", + "!cat ansible.cfg" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "✅ Create a new ssh keypair for the project:\n", + "\n", + "- cleanup previously existing keys;\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Remove existing keys.\n", "! rm id_ansible.pub id_ansible -rf" ] }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "- the key filename is `id_ansible`;\n", + "- the algorithm is `ed25519`, which is the most efficient one. For further information on ed25519 see [RFC8032](https://datatracker.ietf.org/doc/html/rfc8032).\n", + "\n", + "\n" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "! test -f id_ansible || ssh-keygen -q -t ecdsa -f id_ansible -N ''" + "# Eventually create the new keypair.\n", + "! test -f id_ansible || ssh-keygen -q -t ed25519 -f id_ansible -N ''" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "✅ Create the vault password file named `.pin` using the `lookup` plugin. We'll use this password to encrypt the vault later on.\n", + "**NB: if ansible.cfg references a missing or invalid `vault_password_file`, this task will fail.**\n" ] }, { @@ -70,8 +116,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Now a `secret` password ;) We'll reference this in ansible.cfg\n", - "!echo secret > .pin " + "# Use the `lookup` plugin to create a password file named `.pin`.\n", + "!ansible localhost -m copy -a \"content='{{ lookup('password', '/dev/null length=64') }}' dest=$PWD/.pin\"" ] }, { @@ -80,11 +126,42 @@ "metadata": {}, "outputs": [], "source": [ - "# then prepare a deployment descriptor referencing the .pin file\n", - "!cat ansible.cfg" + "# Let's show the vault password :)\n", + "! cat .pin" ] }, { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "✅ Uncomment the vault_password_file in [ansible.cfg](/edit/notebooks/exercise-01/ansible.cfg)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Uncomment the vault_password_file line in ansible.cfg\n", + "! sed -i 's/#vault_password_file/vault_password_file/' ansible.cfg" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Exercise\n", + "\n", + "What is the meaning of the `-q` and `-N` options of `ssh-keygen`?\n", + "\n", + "Hint: open a [terminal](/terminals/man) and use `man ssh-keygen`.\n" + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -104,14 +181,76 @@ ] }, { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- use the `copy` module to generate the `vault.yaml` file with the following content\n", + "\n", + "```yaml\n", + "root_password: \"\"\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Solution\n", + "!ansible localhost -mcopy \\\n", + " -a \"content='root_password: \\\"{{ lookup('password','/dev/null') }}\\\"' dest=$PWD/vault.yaml\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "- use the `ansible_connection=docker` method specified in `inventory.docker` to copy the ssh key to the docker containers. Without this, the ssh key will not be available to the docker containers and you cannot access them via ssh.\n", + "\n", + "In real infrastructures, the ssh keys are usually provisioned at boot.\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNp1U1Fr2zAQ_itCDyMFK8xKnLmmFArdw6CFsr3NCkORLo2pLQVJbpsl-e87W3bTdBQ_6O6-T3efPll7qqwGWlDGmDChCjUU5Mb4alUDUdYYUKGyhjQQNlZ7YXreo5PbDbn7KYwwvl3FVFv1BK6MC1nLVbGWLGbLd7St1CXBgkpL7B9kZcClSO_Yql1BB_ETxM8hn04mPx7IuMFAeLHuib1UDvTFBeK8x_lnOBgtTBSl_WQSIyEM7vlkx0Auy9uR68E998EozT76D-ddDpN8StiUXROVdtKGmJ_ZJqPZf05mI-g3rDLPYIJ1uxIz0ljd4o3EgTUOWY7K3hEH69-45zfQC_rQmzB2fcDKgaDSL8Tz_7v2lFg8kNG5k4cdPMSku1TsovCgjB2u3ug0oQ24RlYaf7Q9-kYEDRtoQNACQw1r2dZBUGGOSG23Wgb4riucTovgWkiobIP9tTNqzCPntpLoYUPxmLXH6laa39Y2IwlTWuzpKy3SLJ_OUz6bZXnOZxn_xhO6owW_XEwX2eUiz_LZfJ7hd0zo377D12mWUOg13McH0r-T4z9VnhsC?type=png)](https://mermaid.live/edit#pako:eNp1U1Fr2zAQ_itCDyMFK8xKnLmmFArdw6CFsr3NCkORLo2pLQVJbpsl-e87W3bTdBQ_6O6-T3efPll7qqwGWlDGmDChCjUU5Mb4alUDUdYYUKGyhjQQNlZ7YXreo5PbDbn7KYwwvl3FVFv1BK6MC1nLVbGWLGbLd7St1CXBgkpL7B9kZcClSO_Yql1BB_ETxM8hn04mPx7IuMFAeLHuib1UDvTFBeK8x_lnOBgtTBSl_WQSIyEM7vlkx0Auy9uR68E998EozT76D-ddDpN8StiUXROVdtKGmJ_ZJqPZf05mI-g3rDLPYIJ1uxIz0ljd4o3EgTUOWY7K3hEH69-45zfQC_rQmzB2fcDKgaDSL8Tz_7v2lFg8kNG5k4cdPMSku1TsovCgjB2u3ug0oQ24RlYaf7Q9-kYEDRtoQNACQw1r2dZBUGGOSG23Wgb4riucTovgWkiobIP9tTNqzCPntpLoYUPxmLXH6laa39Y2IwlTWuzpKy3SLJ_OUz6bZXnOZxn_xhO6owW_XEwX2eUiz_LZfJ7hd0zo377D12mWUOg13McH0r-T4z9VnhsC)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cat inventory.docker" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Provision the ssh keys using the docker_connector.\n", + "# Why do we need to use double braces?\n", + "!ansible docker -i inventory.docker -m shell \\\n", + " -a \"echo 'root:{{{{root_password}}}}' | chpasswd\" \\\n", + " -e@vault.yaml " + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Gotta ping 'em all\n", "\n", - "Everything set up now. \n", + "Our Ansible environment is all set, but we still need to check if we can reach all the hosts.\n", "\n", - "Ping all hosts now, eventually adjusting ip ranges in then [inventory](/edit/notebooks/exercise-01/inventory)." + "Let's ping all hosts, eventually adjusting the ip ranges in the [inventory](/edit/notebooks/exercise-01/inventory).\n", + "\n", + "Open a [terminal](/terminals/man) and use `docker inspect` to retrieve the container IPs." ] }, { @@ -121,10 +260,11 @@ "outputs": [], "source": [ "# Let's get an error: root_password is UNDEFINED. \n", - "!ansible -m ping all\n" + "!ansible -m ping all" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -141,9 +281,8 @@ "source": [ "# A vault is just a yaml file containing a dictionary of secrets.\n", "# We can put here as many information as we want, but for now\n", - "# just put the `root_password`.\n", - "\n", - "!echo \"root_password: root\" > vault.yml" + "# just set a `root_password` generated using the lookup plugin.\n", + "!cat vault.yaml" ] }, { @@ -153,18 +292,19 @@ "outputs": [], "source": [ "# We need to encrypt it. \n", - "!ansible-vault encrypt vault.yml" + "!ansible-vault encrypt vault.yaml" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "#### Exercise\n", "\n", - " - which secret is used to encrypt vault.yml ?\n", + " - which secret is used to encrypt vault.yaml ?\n", " - where is it specified?\n", - " - what happens if you try to re-encrypt the vault.yml" + " - what happens if you try to re-encrypt the vault.yaml" ] }, { @@ -174,7 +314,7 @@ "outputs": [], "source": [ "# And show the anatomy of the vault.\n", - "!cat vault.yml" + "!cat vault.yaml" ] }, { @@ -184,7 +324,7 @@ "outputs": [], "source": [ "# Can we decrypt it?\n", - "!ansible-vault view vault.yml" + "!ansible-vault view vault.yaml" ] }, { @@ -193,10 +333,11 @@ "metadata": {}, "outputs": [], "source": [ - "!ansible -m ping all -e@vault.yml" + "!ansible -m ping all -e@vault.yaml" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -216,12 +357,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## SSH Authentication\n", "\n", - "We want to switch from password to ssh authentication. Create a playbook to install ssh keys: it reads the password from vault.yml\n" + "We want to switch from password to ssh authentication. Create a playbook to install ssh keys: it reads the password from vault.yaml\n" ] }, { @@ -252,6 +394,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "solution": "shown", @@ -292,17 +435,20 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "#### Exercise\n", "\n", "Run ansible in verbose mode to se all the injected ssh argument. If the output is too verbose, reduce it either with:\n", - " - `--limit ipaddress` to contact only one node\n", - " - host indexing/subscript eg: `all[0]` " + "\n", + "- `--limit ipaddress` to contact only one node\n", + "- host indexing/subscript eg: `all[0]` " ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -314,9 +460,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -328,7 +474,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.13" + "version": "3.9.9" } }, "nbformat": 4, diff --git a/ansible-101/notebooks/03_facts_and_variables.ipynb b/ansible-101/notebooks/03_facts_and_variables.ipynb index 1a32289..e489012 100644 --- a/ansible-101/notebooks/03_facts_and_variables.ipynb +++ b/ansible-101/notebooks/03_facts_and_variables.ipynb @@ -1,16 +1,12 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Facts and variables\n", - "\n", - "When connecting to an host, ansible creates a dictionary with a huge set of useful informations.\n", - "\n", - "This step is named **fact gathering**. \n", - "\n", - "You can inspect facts using the `setup` module." + "#### ([exercise directory](/tree/notebooks/exercise-03))" ] }, { @@ -22,6 +18,19 @@ "cd /notebooks/exercise-03" ] }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "When connecting to an host, ansible creates a dictionary with a huge set of useful informations.\n", + "\n", + "This step is named **fact gathering**. \n", + "\n", + "You can inspect facts using the `setup` module." + ] + }, { "cell_type": "code", "execution_count": null, @@ -29,20 +38,20 @@ "outputs": [], "source": [ "# Let's run the setup module\n", - "\n", "!ansible -i inventory -m setup localhost " ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "#### Exercise\n", "\n", - " - run the following command\n", - " - what does it do?\n", - " - where's its output ?\n", - " - use the json module to reindent it" + "Run the following command\n", + "- what does it do?\n", + "- where's its output ?\n", + "- use the json python module to make it readable. **Hint: you need to `import json` to parse the output**" ] }, { @@ -75,12 +84,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "#### Exercise\n", "\n", - "Gather the following infos from the output of the previous command.\n", + "Using python, `print()` the following information from the output of the previous command:\n", "\n", " - hostname\n", " - first ip address\n", @@ -99,13 +109,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "#### Exercise\n", "\n", - "modify [this playbook](/edit/notebooks/exercise-03/get-facts.yml) to print:\n", - "\n", + "modify [this playbook](/edit/notebooks/exercise-03/get-facts.yml) to dump:\n", "\n", " - hostname\n", " - first ip address\n", @@ -123,6 +133,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -130,11 +141,11 @@ "\n", "Essentially, facts are per-host variables gathered by ansible.\n", "\n", - "In the vault lesson we've seen variables in action loaded it via -e @variables.yml.\n", + "In the vault lesson, we've seen variables in action loaded it via `-e @variables.yml`.\n", "\n", "Now we'll define them with \n", "\n", - "```\n", + "```yaml\n", "- hosts: localhost\n", " vars:\n", " - one_variable: 1\n", @@ -144,6 +155,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -157,13 +169,13 @@ "\n", "Here's a template example\n", "\n", - "```\n", + "```html\n", "\n", " \n", "\n", "# template.j2\n", "This is a static line while the following one\n", - "expands the ansible_hostname variable {{ansible_hostname}} \n", + "expands the ansible_hostname variable {{ ansible_hostname }} \n", "\n", "Now we process a simple number {{ 3.1415 | int }}\n", "\n", @@ -206,12 +218,23 @@ ] }, { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Extra Exercise\n", + "\n", + "Re-run the above playbook disabling fact gathering setting the process environment variable `ANSIBLE_GATHERING=explicit`." + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## filters\n", "\n", - "Inside {{ braces }} you can process expressions using filters.\n", + "Inside `{{` braces `}}` you can process expressions using filters.\n", "\n", "```\n", "\n", @@ -266,6 +289,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -273,12 +297,12 @@ "\n", "Edit [the last section of vars-and-facts.yml](/edit/notebooks/exercise-03/vars-and-facts.yml) so that it shows:\n", " \n", - " - the free percentage of every device\n", + "- the free percentage of every block device\n", "\n", "Hints:\n", " \n", - " - use the playbook as a reference\n", - " - iterate thru server facts" + "- use the playbook as a reference\n", + "- iterate thru server facts" ] }, { @@ -295,7 +319,11 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# Check the difference between the output of ansible\n", + "# and the one of the OS.\n", + "!df -h" + ] } ], "metadata": { @@ -314,7 +342,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.13" + "version": "2.7.14" } }, "nbformat": 4, diff --git a/ansible-101/notebooks/04_loops_and_conditions.ipynb b/ansible-101/notebooks/04_loops_and_conditions.ipynb index b23cbbe..af3476a 100644 --- a/ansible-101/notebooks/04_loops_and_conditions.ipynb +++ b/ansible-101/notebooks/04_loops_and_conditions.ipynb @@ -1,10 +1,28 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Loops\n", + "#### ([exercise directory](/tree/notebooks/exercise-03))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd /notebooks/exercise-03" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", "To repeat a *single task* multiple times, you can use *`with_items`*.\n", "\n", @@ -64,15 +82,7 @@ ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cd /notebooks/exercise-03" - ] - }, - { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -123,7 +133,7 @@ "\n", "Remember on `when` and `with_items`\n", "\n", - "#### `when` statements should not include jinja2 templating delimiters such as {{ }} or {% %}\n", + "#### `when` statements should not include jinja2 templating delimiters such as {{ }} or {% %} because they are evaluated as python code.\n", "\n", "#### `with_items` [requires proper templating and braces](http://docs.ansible.com/ansible/porting_guide_2.0.html#other-caveats). Do it for all expressions everywhere except conditionals (`when`):\n" ] @@ -138,6 +148,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -149,10 +160,12 @@ "- hosts: localhost \n", " tags: assert\n", " tasks: \n", - " - fail: msg=\"This is Linux!\"\n", + " - fail:\n", + " msg: \"This is Linux!\"\n", " when: ansible_system != 'Linux'\n", "\n", - " - fail: msg=\"Stop processing now!\"\n", + " - fail:\n", + " msg: \"Stop processing now!\"\n", " when: ansible_architecture == 'x86_64'\n", "\n", "```" @@ -168,6 +181,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -191,6 +205,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -198,13 +213,10 @@ "\n", "There is a set of predefined variables too:\n", "\n", - " - group_names\n", - " - groups\n", - " - environment \n", - " \n", - "When using playbooks, we'll get this one too:\n", - "\n", - " - hostvars\n" + " - `group_names`\n", + " - `groups`\n", + " - `environment`\n", + " - `hostvars`\n" ] }, { @@ -213,10 +225,11 @@ "metadata": {}, "outputs": [], "source": [ - "!ansible all[0] -i ../web -m debug -a \"var=groups\"" + "!ansible all[0] -i web -m debug -a \"var=groups\"" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -238,6 +251,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -287,6 +301,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/ansible-101/notebooks/05_inventories.ipynb b/ansible-101/notebooks/05_inventories.ipynb index 33286d6..8df02b5 100644 --- a/ansible-101/notebooks/05_inventories.ipynb +++ b/ansible-101/notebooks/05_inventories.ipynb @@ -1,97 +1,55 @@ { "cells": [ { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Inventories \n", + "#### ([exercise directory](/tree/notebooks/exercise-05))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd /notebooks/exercise-05" + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "# Inventories\n", "\n", "Inventories are a fundamental doc entrypoint for our infrastructures. \n", "\n", "They contain a lot of informations, including:\n", " \n", - " - ansible_user\n", - " - configuration variables in [group_name:vars]\n", - " - host grouping eg. by geographical zones in [group_name:children]\n", + "- `ansible_user`\n", + "- configuration variables in `[group_name:vars]`\n", + "- host grouping eg. by geographical zones in `[group_name:children]`\n", " \n", " \n", "Files:\n", "\n", - "\n", - " - [inventory](/edit/notebooks/exercise-05/inventory)\n", + "- [open the inventory](/edit/notebooks/exercise-05/inventory) file or print it\n", " " ] }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/notebooks/exercise-05\n" - ] - } - ], - "source": [ - "cd /notebooks/exercise-05" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# I can group hosts in inventory\r\n", - "\r\n", - "[web:children]\r\n", - "web_rome\r\n", - "web_milan\r\n", - "\r\n", - "[web_rome:children]\r\n", - "web_rome_test\r\n", - "web_rome_prod\r\n", - "\r\n", - "[web_rome_prod]\r\n", - "172.23.0.[3:4]\r\n", - "\r\n", - "[web_milan]\r\n", - "172.24.0.[5:6]\r\n", - "\r\n", - "# further host variables\r\n", - "[web_rome:vars]\r\n", - "ansible_username=root \r\n", - "\r\n", - "\r\n", - "# Connect with docker \r\n", - "[web_rome_test:vars]\r\n", - "ansible_connection=docker\r\n", - "ansible_docker_extra_args=\"-Htcp://172.17.0.1\"\r\n", - "\r\n", - "# The actual host reference\r\n", - "[web_rome_test]\r\n", - "ansible101_web_1\r\n", - "\r\n", - "# \r\n", - "# Don't need to be Ssh \r\n", - "# to be my local machine ;)\r\n", - "#\r\n", - "[course]\r\n", - "localhost ansible_connection=local " - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "!cat inventory" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -108,6 +66,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -115,7 +74,7 @@ "\n", "Use `ansible` to show:\n", " \n", - " - all hosts of the web group." + "- all hosts of the web group." ] }, { @@ -139,10 +98,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Inventory scripts" + "## Inventory scripts\n", + "\n", + "To create custom inventory scripts just use python ;) and set the script path in `inventory`." ] }, { @@ -151,11 +113,11 @@ "metadata": {}, "outputs": [], "source": [ - "#To create custom inventory scripts just use python ;) and set it in\n", "!grep inventory ansible.cfg # inventory = ./docker-inventory.py" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -164,6 +126,14 @@ "in the official ansible documentation find at least 3 `ansible_connection=docker` parameters " ] }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An example python inventory script." + ] + }, { "cell_type": "code", "execution_count": null, @@ -188,20 +158,31 @@ "c = Client(base_url=\"http://172.17.0.1:2375/")\n", "\n", "# Define a function to make it clear!\n", - "container_fmt = lambda x: (\n", - " x['Names'][0][1:],\n", - " x['Labels']['com.docker.compose.service'], \n", - " x['NetworkSettings']['Networks']['bridge']['IPAddress'],\n", - ")\n", + "def get_inventory_data(container):\n", + " return {\n", + " \"container_name\": container[\"Names\"][0][1:],\n", + " \"ip_address\": container[\"NetworkSettings\"][\"Networks\"][\"bridge\"][\"IPAddress\"],\n", + " \"group_name\": container[\"Labels\"].get(\"com.docker.compose.service\"),\n", + " }\n", "\n", - "for x in c.containers():\n", + "for container in c.containers():\n", + " # The following exercises will ask you to\n", + " # modify the code in the loop in order to create a correct inventory.\n", " try:\n", - " print(*container_fmt(x), sep='\\t\\t')\n", + " print(get_inventory_data(container))\n", " except KeyError:\n", " # skip non-docker-compose containers\n", " pass" ] }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "JSON inventories" + ] + }, { "cell_type": "code", "execution_count": null, @@ -232,26 +213,28 @@ "outputs": [], "source": [ "# You can pass variables to generated inventories too\n", - "inventories['web']['host_vars'] = {\n", + "inventories['web']['vars'] = {\n", " 'ansible_ssh_common_args': ' -o GSSApiAuthentication=no'\n", "}\n", "print(json.dumps(inventories, indent=1))" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "#### Exercise: \n", "\n", - "Reuse the code in [inventory-docker.py](/edit/notebooks/exercise-05/inventory-docker.py) to print a json inventory that:\n", + "Reuse the code in [inventory-docker.py](/edit/notebooks/exercise-05/inventory-docker.py) to print a JSON inventory that:\n", "\n", - " - connects via docker to \"web\" hosts\n", - " - connects via ssh to \"ansible\" hosts \n", + "- connects via docker to \"web\" hosts\n", + "- connects via ssh to \"ansible\" hosts \n", "\n", "Test it in the cell below.\n", "\n", - "** NOTE: there's a [docker inventory](https://github.com/ansible/ansible/blob/devel/contrib/inventory/docker.py) script shipped with ansible **" + "**NOTE: there's a [docker inventory](https://github.com/ansible-community/contrib-scripts/blob/main/inventory/docker.py) script shipped with ansible**\n", + "\n" ] }, { @@ -264,6 +247,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "code_folding": [] @@ -284,15 +268,17 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Configurations\n", "\n", - "You may want to split inventory files and separate prod and test environment." + "You may want to split inventory files and separate production and test environments." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -316,6 +302,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -337,6 +324,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -358,6 +346,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -374,6 +363,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -384,6 +374,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -391,28 +382,34 @@ "\n", "Host vars can be used in automated or cloud deployments where:\n", "\n", - " - every new host or vm, at boot, populate its own entries in `host_vars` (Eg. via file)\n", - " - ansible is run after that setup and uses `host_vars` to configure the server and expose that values to the other machines." + " - every new host or vm, at boot, populate its own entries in `host_vars` (e.g., via file)\n", + " - ansible is run after that setup, and uses `host_vars` to configure the server and to expose that values to the other machines." ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.14" + "pygments_lexer": "ipython3", + "version": "3.9.9" } }, "nbformat": 4, diff --git a/ansible-101/notebooks/06_bastion_and_ssh.ipynb b/ansible-101/notebooks/06_bastion_and_ssh.ipynb index ae8602f..af6db31 100644 --- a/ansible-101/notebooks/06_bastion_and_ssh.ipynb +++ b/ansible-101/notebooks/06_bastion_and_ssh.ipynb @@ -1,47 +1,82 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Bastion hosts\n", + "#### ([exercise directory](/tree/notebooks/exercise-06))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd /notebooks/exercise-06/" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", "There are many reasons for using bastion hosts:\n", "\n", - " - security access eg in cloud environment\n", - " - vpn eg via windows hosts\n", + "- security access eg in cloud environment\n", + "- vpn eg via Windows hosts\n", " \n", - "The latter case is quite boring as ansible doesn't support windows as a client platform.\n", + "The latter case is not interesting as [ansible doesn't support windows as a client platform](https://docs.ansible.com/ansible/latest/os_guide/windows_faq.html#does-ansible-work-with-windows-xp-or-server-2003).\n", "\n", "A standard approach is:\n", "\n", - " - have a ssh server or a proxy installed on the bastion\n", - " - connecto the bastion to the remote network (eg. via vpn)\n", - " - configure ssh options in ansible to connect thru the bastion\n", + "- to have an ssh server or a proxy installed on the bastion\n", + "- connecto the bastion to the remote network (eg. via vpn)\n", + "- configure ssh options in ansible to connect through the bastion\n", " \n", "We'll do this via two configuration files:\n", "\n", - " - a standard ssh_config where we put the passthru configuration\n", - " - a simple ansible.cfg referencing ssh_config\n", + "- a standard `ssh_config` where we put the \"passthru\" configuration\n", + "- a simple `ansible.cfg` referencing `ssh_config`\n", " \n", "This approach allows us:\n", "\n", - " 1. to test the standard ssh connection thru the bastion without messing with ansible\n", - " 2. keep ansible.cfg simple in case we want to reuse them from the intranet (Eg. without traversing the bastion)\n", + "1. to test the standard ssh connection thru the bastion without messing with ansible\n", + "2. keep `ansible.cfg` simple in case we want to reuse them from the intranet (Eg. without traversing the bastion)\n", " \n", - " " + "[![](https://mermaid.ink/img/pako:eNp1UsuOmzAU_RXrLqoZCSwwCSEsKlVqF5XaTbsrjioHLolVuI7AaDLD8O81wekgjbq6r8N5CI9Qmgohh1OnLmf27YckSWWjkezDA14tdqQaKelsestqldcqHHrswhOq7vFxBivq9bFBXtanYtV7cK0bPEjq-_Pv0lCtT8Vbu4awkIcf2dttZu6H4-LK0xaflnq4O5w_Mmwt-o4FqVoREdriKy2h5uHJdH-8Cz-FT7rD6rAEjtkHNlfha-L5VoxV-1IwBz-q3mpD4yjBt5IWYrRnp4eWxZuYi4RvEn59f9uJ-Rbx2LHDNEly239qPiELw_DVhXtlXmJesP8YhQBa7FqlK_dzR0mMSXBqLUrIXVthrYbGSpA0OehwqZTFL5W2poPcdgMGoAZrfj5TeZ8XzGetXPIWnP2md9uLol_GtHeQGyEf4Qq5yFKeiWifxdtkm0ZZtgngGfI423GRZtF2t0_cLYrFFMDLjSHi6T5O0iTdC5G5GiUB4M3S9-WN3p7q9BcAcurg?type=png)](https://mermaid.live/edit#pako:eNp1UsuOmzAU_RXrLqoZCSwwCSEsKlVqF5XaTbsrjioHLolVuI7AaDLD8O81wekgjbq6r8N5CI9Qmgohh1OnLmf27YckSWWjkezDA14tdqQaKelsestqldcqHHrswhOq7vFxBivq9bFBXtanYtV7cK0bPEjq-_Pv0lCtT8Vbu4awkIcf2dttZu6H4-LK0xaflnq4O5w_Mmwt-o4FqVoREdriKy2h5uHJdH-8Cz-FT7rD6rAEjtkHNlfha-L5VoxV-1IwBz-q3mpD4yjBt5IWYrRnp4eWxZuYi4RvEn59f9uJ-Rbx2LHDNEly239qPiELw_DVhXtlXmJesP8YhQBa7FqlK_dzR0mMSXBqLUrIXVthrYbGSpA0OehwqZTFL5W2poPcdgMGoAZrfj5TeZ8XzGetXPIWnP2md9uLol_GtHeQGyEf4Qq5yFKeiWifxdtkm0ZZtgngGfI423GRZtF2t0_cLYrFFMDLjSHi6T5O0iTdC5G5GiUB4M3S9-WN3p7q9BcAcurg) " ] }, { - "cell_type": "code", - "execution_count": null, + "attachments": {}, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "cd /notebooks/exercise-06/" + "## SSH basics\n", + "\n", + "SSH connections follows roughly this basic schema (more information in [RFC4254](https://www.rfc-editor.org/rfc/rfc4253) and further revisions):\n", + "\n", + "1. client connects to server\n", + "2. server sends its public key\n", + "3. client checks the public key against its `known_hosts` file\n", + "4. if the key is not found, the client asks the user to confirm the key\n", + "5. if the key is confirmed, the client saves it in `known_hosts`\n", + "6. client sends its public key\n", + "\n", + "### SSH Caveats\n", + "\n", + "Host key checking is important. Incorrect validation can lead to:\n", + "\n", + "- persisting an insecure key in your `known_hosts` file\n", + "- connecting to the wrong host, that could be a malicious one, and leak credentials and other sensitive information\n", + "- connecting to the wrong host (e.g., because someone assigned a wrong IP), and execute commands on it" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -60,6 +95,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -70,24 +106,24 @@ "\n", "Inhibit PKI authentication is insecure by design:\n", "\n", - " - passwords will surely ends in cleartext files\n", - " - people ends doing things like the following\n", + "- passwords will end on post-it\n", + "- people will save them in cleartext files, doing things like the following\n", " \n", - "```\n", + "```bash\n", "#\n", "# the password is sent to the bastion via a\n", "# cleartext file.\n", "Match Host 172.25.0.*\n", - " ProxyCommand sshpass -f cleartext-bastion-password ssh -F config jump@bastion -W %h:%p \n", + " ProxyCommand sshpass -f /home/user/.cleartext-bastion-password ssh -F config jump@bastion -W %h:%p \n", "\n", "```\n", "\n", "\n", "## Connect to the bastion\n", "\n", - "Test connectivity to the bastion. Check your host ips and modify [`ssh_config`](/edit/notebooks/exercise-06/ssh_config) accordingly.\n", + "Test connectivity to the bastion. Check your host IPs and modify [`ssh_config`](/edit/notebooks/exercise-06/ssh_config) accordingly.\n", "\n", - "** Replace ALL bastion occurrencies, including the one below the BEWARE note**" + "**Replace ALL bastion IP address occurrencies, including the one below the BEWARE note**" ] }, { @@ -97,10 +133,11 @@ "outputs": [], "source": [ "fmt=r'{{.NetworkSettings.IPAddress}}'\n", - "!docker -H tcp://172.17.0.1:2375 inspect ansible101_bastion_1 --format {fmt} # pass variables *before* commands ;)" + "!docker -H tcp://172.17.0.1:2375 inspect ansible-101_bastion_1 --format {fmt} # pass variables *before* commands ;)" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -108,29 +145,31 @@ "\n", "Write the [ssh-copy-id.yml](/edit/notebooks/exercise-06/ssh-copy-id.yml) playbook to install an ssh key to the bastion.\n", "\n", + "NOTE: a cooler version of this exercise includes generating random passwords for `root` and for the vault using the `lookup()` plugin,\n", + "like we did in 02_vaults.\n", "\n", - "Bastion credentials are:\n", + "Set the bastion credentials to:\n", "\n", - " - user: `root`\n", - " - password `root`\n", + "- user: `root`\n", + "- password `root`\n", " \n", "Try to do it without watching the previous exercises:\n", "\n", - " - modify the empty [`ansible.cfg`](/edit/notebooks/exercise-06/ansible.cfg)\n", - " * referencing a pin file \n", - " * passing [`[ssh_connection]`](http://docs.ansible.com/ansible/intro_configuration.html#openssh-specific-settings) arguments to avoid ssh key mismatches\n", - " * pointing to the local inventory\n", - " - store credentials in the encrypted [`vault.yml`](/edit/notebooks/exercise-06/vault.yml). \n", - " - provide an [`inventory`](/edit/notebooks/exercise-06/inventory) file \n", + "- modify the empty [`ansible.cfg`](/edit/notebooks/exercise-06/ansible.cfg)\n", + " * referencing the vault password file named `pin` \n", + " * passing [`[ssh_connection]`](http://docs.ansible.com/ansible/intro_configuration.html#openssh-specific-settings) arguments to avoid ssh key mismatches\n", + " * pointing to the local inventory\n", + "- store credentials in the encrypted [`vault.yml`](/edit/notebooks/exercise-06/vault.yml). \n", + "- provide an [`inventory`](/edit/notebooks/exercise-06/inventory) file \n", "\n", "You can reuse the old id_ansible key or:\n", "\n", - " - create a new one and adjust the reference in [` ssh_config`](/edit/notebooks/exercise-06/ssh_config) \n", + "- create a new one and adjust the reference in [` ssh_config`](/edit/notebooks/exercise-06/ssh_config) \n", " \n", "Hint:\n", "\n", - " - if you provide an IdentityFile, password authentication won't work on the `bastion` node;\n", - " - you *must* copy ssh id file using password authentication and eventually clean up your known_host file" + "- if you provide an `IdentityFile`, password authentication won't work on the `bastion` node;\n", + "- you *must* copy ssh id file using password authentication and eventually clean up your `known_host` file" ] }, { @@ -161,6 +200,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -170,18 +210,19 @@ "\n", "We can instead just set\n", "\n", - "```\n", + "```ini\n", "[ssh_connection]\n", "ssh_args = -F ssh_config\n", "```\n", "\n", - "Write everything in ssh_config.\n", + "Write everything in `ssh_config`.\n", "\n", "\n", "Connecting via bastion in ansible enforcing multiple references to ssh_config" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -198,7 +239,7 @@ "outputs": [], "source": [ "fmt=r'{{.NetworkSettings.IPAddress}}'\n", - "!docker -H tcp://172.17.0.1:2375 inspect ansible101_web_1 --format {fmt} # pass variables *before* commands ;)" + "!docker -H tcp://172.17.0.1:2375 inspect ansible-101_web_1 --format {fmt} # pass variables *before* commands ;)" ] }, { @@ -207,20 +248,21 @@ "metadata": {}, "outputs": [], "source": [ - "!ssh -F ssh_config root@172.17.0.4 ip -4 -o a # get host ip" + "!ssh -F ssh_config root@172.17.0.4 env # get host ip" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "#### Exercise\n", "\n", - "Configure your `ansible.cfg` so that every web host is accessed via the bastion.\n", + "Configure your [`ansible.cfg`](/edit/notebooks/exercise-06/ansible.cfg) so that every host in the `web` group is accessed via the bastion.\n", "\n", - " - recycle your dynamic inventory script to access web hosts\n", - " - your id_ansible key should already be on your web hosts \n", - " - use ansible -m ping to check host connectivity\n", + " - recycle your dynamic inventory script to access the hosts in the `web` group\n", + " - your id_ansible key should already be on your hosts \n", + " - use `ansible -m ping` to check host connectivity\n", " - run `ps -ef | grep ssh` on your docker host to check all the `ProxyCommand` processes." ] }, diff --git a/ansible-101/notebooks/07_playbooks.ipynb b/ansible-101/notebooks/07_playbooks.ipynb index 3833f94..5a1155f 100644 --- a/ansible-101/notebooks/07_playbooks.ipynb +++ b/ansible-101/notebooks/07_playbooks.ipynb @@ -1,30 +1,58 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Playbooks\n", + "#### ([exercise directory](/tree/notebooks/exercise-07))\n", + "#### NB: This lesson requires that the target nodes are reachable via ssh key exchange. Complete lesson 02_vaults (again :) before starting this lesson." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd /notebooks/exercise-07" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", "\n", - "As we already learn in the other lessons, playbooks are yaml files where we group tasks.\n", + "As we already learn in the other lessons, playbooks are YAML files where we group **tasks**.\n", "\n", - "A playbook starts with an hosts mark specifying the hosts to run the playbook to.\n", + "A playbook starts with an `hosts` mark specifying the target hosts to run the playbook on.\n", "\n", - "```\n", - "# Yaml files usually start with 3 dashes: ---\n", - "# It's just a separator.\n", + "```yaml\n", + "# Since you can have multiple YAML documents in a single file,\n", + "# YAML files usually start with 3 dashes: ---\n", + "# and may end with 3 dots: ...\n", "---\n", "- hosts: web\n", " tasks: \n", - " ...\n", - "```\n", - "\n", + " ..\n", + "...\n", + "```" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", - "In yaml, a playbook is a *list* of `hosts` entries.\n", + "A playbook is a *list* of `hosts` entries in a single YAML document.\n", "\n", "\n", - "```\n", + "```yaml\n", + "---\n", "- hosts: localhost\n", " tasks:\n", " - name: one or more tasks to be run on localhost\n", @@ -34,11 +62,18 @@ " tasks:\n", " - name: followed by tasks to be run on web hostgroup\n", " ...\n", - "```\n", + "```\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", "We can even pick a single element in an host group, or add `tags` entries to restrict executions\n", "\n", - "```\n", + "```yaml\n", "- hosts: db[0]\n", " tags: beware\n", " tasks:\n", @@ -46,25 +81,40 @@ " ...\n", " - name: run many tasks...\n", "\n", - "```\n", + "```\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", - "You can limit execution of a single task on a single host within a group using `run_once` .\n", - "```\n", + "You can limit execution of a single task on a single host within a group using `run_once`.\n", + "\n", + "```yaml\n", "- hosts: db\n", " tags: beware\n", " tasks:\n", - " - name: Only this task is run on one of the db nodes\n", + " - name: Only this task is run on one of the db nodes.\n", " run_once: yes\n", " \n", " - name: Other tasks are run on all nodes!\n", - "```\n", + "```\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", - "We can even delegate the execution on a given host (eg. the ansible one)\n", + "We can even delegate the execution on a given host (e.g., the ansible one)\n", "\n", - "```\n", + "```yaml\n", "- hosts: db\n", " tasks:\n", - " - name: run this task from the local ansible host\n", + " - name: Run this task from the local ansible host.\n", " run_once: yes\n", " delegate_to: localhost\n", " shell: |\n", @@ -73,15 +123,7 @@ ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cd /notebooks/exercise-07" - ] - }, - { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -94,8 +136,6 @@ " - curl\n", " - file, copy & fetch\n", "\n", - "Creating small reports.\n", - "\n", "More fun with:\n", "\n", " - iterations\n", @@ -107,6 +147,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -115,16 +156,16 @@ "\n", "When run, a playbook gathers facts about hosts (remember the setup module?).\n", "\n", - "Gathering facts may be time-consuming, so you can [tune it via ansible.cfg](http://docs.ansible.com/ansible/intro_configuration.html#gathering) or disable it\n", + "Gathering facts may be time-consuming, so you can [tune it via ansible.cfg](http://docs.ansible.com/ansible/latest/intro_configuration.html#gathering) or disable it\n", "\n", - "```\n", + "```yaml\n", "- hosts: web\n", " gather_facts: no\n", " tasks:\n", " ...\n", "```\n", "\n", - "We can use the [predefined variables](http://docs.ansible.com/ansible/playbooks_variables.html#magic-variables-and-how-to-access-information-about-other-hosts) too:\n", + "We can use the [predefined variables](http://docs.ansible.com/ansible/latest/playbooks_variables.html#magic-variables-and-how-to-access-information-about-other-hosts) too:\n", "\n", " - group_names\n", " - groups\n", @@ -142,6 +183,16 @@ ] }, { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!ansible-playbook debug.yml --tags groups" + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -160,18 +211,20 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### Hints\n", "\n", " - use `ansible_facts` as possible \n", - " - avoid gathering facts with uname & co\n", - " - test and template your iterations statically instead of continuosly gathering facts\n", + " - avoid gathering facts using system commands like `uname` & co\n", + " - test and template your iterations statically instead of continuosly gathering facts (do you remember lesson [03_facts_and_variables(/notebooks/notebooks/03_facts_and_variables.ipynb)]?) )\n", " " ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -181,9 +234,42 @@ "\n", "Between set variables we have:\n", "\n", - " - hostvars\n", - " - groups\n", - " \n" + "- `groups`\n", + "- `hostvars`\n", + "\n", + "⚠ Fact gathering requires that specific tools are installed on the target host.\n", + "For example, gathering IP addresses requires the `iproute2` package to be installed.\n", + "\n", + "##### Exercise:\n", + "\n", + "Run the following playbook and look at the output of `hostvars`:\n", + "\n", + "1. which major facts are gathered?\n", + "1. do you see any network related fact?\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!ansible-playbook hostvars.yml --tags debug" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "##### Exercise\n", + "\n", + "Run the following playbook and:\n", + "\n", + "1. look at the error message: which fact is missing?\n", + "1. Can you gather the missing fact using the `setup` module? Why? **Hint: access the container and try to run `ip addr`.**\n", + "1. Can you fix the playbook to ensure that the `ip` fact is gathered?" ] }, { @@ -192,10 +278,11 @@ "metadata": {}, "outputs": [], "source": [ - "!ansible-playbook debug.yml --tags hostvars" + "!ansible-playbook hostvars.yml --tags hostvars" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "solution": "hidden", @@ -208,6 +295,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "solution": "hidden" @@ -217,24 +305,33 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Filters\n", "\n", - "We can process hostvars using jinja filters (see the facts and variable lesson). \n", + "We can process hostvars using jinja filters (see the [03_facts_and_variables](/notebooks/notebooks/03_facts_and_variables.ipynb) lesson). \n", "\n", "Here is a [list of useful filters](http://docs.ansible.com/ansible/latest/playbooks_filters.html)\n", "\n", "\n", "Remember: a filter is essentially a function returning a function, like a [python lambda](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions).\n", "\n", - "```\n", + "```python\n", "int_filter = lambda x: int(x)\n", - "```\n", + "```" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", "A more complex filter: \n", - "```\n", + "\n", + "```yaml\n", "- name: This is a getter\n", " debug:\n", " msg: >\n", @@ -244,9 +341,69 @@ "```\n", "\n", "where\n", - "```\n", + "\n", + "```python\n", "hostvars_getter = lambda host: hostvars[host]['key1'][..]['keyN']\n", "```\n", + "\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Exercise\n", + "\n", + "Given the data in the cell below, identify the output of the following filter:\n", + "\n", + "```yaml\n", + "debug:\n", + " msg: >-\n", + " hosts | map('extract', hostvars, ['ipv4', 'address'])\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Given the following data, identify the output of\n", + "hosts = [\"host1\", \"host2\"]\n", + "hostvars = {\n", + " \"host1\": {\n", + " \"ipv4\": {\n", + " \"address\": \"172.17.0.1\"\n", + " }\n", + " },\n", + " \"host2\": {\n", + " \"ipv4\": {\n", + " \"address\": \"172.17.0.2\"\n", + " }\n", + " }\n", + "}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Exercise (advanced)\n", + "\n", + "Write a python function fmap associated to the above filter.\n", + "Hints:\n", + "1. `fmap` returns a function that takes a list as input;\n", + "1. `fmap` takes hostvars as a closure." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", "You can pipeline filters and test incrementally." ] @@ -257,10 +414,11 @@ "metadata": {}, "outputs": [], "source": [ - "!ansible-playbook debug.yml --tags filters" + "!ansible-playbook hostvars.yml --tags filters" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -268,15 +426,18 @@ "\n", "Creating user and installing packages is easy\n", "\n", - "```\n", + "```yaml\n", " - name: Install apache\n", - " apt: item=\"{{item}}\" state=present\n", + " apt:\n", + " item: \"{{item}}\" state=present\n", " with_items:\n", " - apache2\n", " - curl\n", " \n", " - name: Remove wget\n", - " apt: item=wget state=absent\n", + " apt:\n", + " item: wget \n", + " state: absent\n", "```" ] }, @@ -308,6 +469,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -339,6 +501,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -368,6 +531,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -379,6 +543,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -389,7 +554,7 @@ " - every command stdout/stderr is redirected to a given file\n", " - before and after every command output print a header and a footer (eg. the expected output is like\n", " \n", - "```\n", + "```text\n", "--- START COMMAND: cat /etc/resolv.conf --\n", "nameserver 172.17.0.1\n", "-- END COMMAND: cat /etc/resolv.conf\n", @@ -412,6 +577,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -429,7 +595,7 @@ "\n", "Here's a full example!\n", "\n", - "```\n", + "```yaml\n", " - name: This shell task will not modify the system\n", " shell: |\n", " pgrep -fa tomcat\n", @@ -471,6 +637,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -485,7 +652,7 @@ " \n", "*service* manages and enables services\n", "\n", - "```\n", + "```yaml\n", " - name: Restart httpd\n", " service: \n", " name: httpd\n", @@ -496,21 +663,18 @@ "*systemd* manages and enables services with systemd. It can reload systemd configuration too\n", "\n", "\n", - "```\n", - "\n", + "```yaml\n", " - name: Reload docker with new systemd config\n", " systemd:\n", " state: restarted\n", " name: docker\n", " daemon_reload: yes\n", - "\n", - "\n", "```\n", " \n", "*mount* populates entries in /etc/fstab and mounts associate filesystem.\n", "\n", "\n", - "```\n", + "```yaml\n", " - name: Check if mongod LUN is mounted\n", " register: mongod_on_storage\n", " mount:\n", @@ -518,7 +682,6 @@ " src: /dev/sdc\n", " fstype: xfs\n", " state: present # creates an entry in /etc/fstab. `mounted` does the actual mount.\n", - "\n", "```\n", "\n", "#### Exercise\n", diff --git a/ansible-101/notebooks/07_templates.ipynb b/ansible-101/notebooks/07_templates.ipynb index 1dfcc88..46e0c28 100644 --- a/ansible-101/notebooks/07_templates.ipynb +++ b/ansible-101/notebooks/07_templates.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -21,6 +22,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -91,6 +93,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -127,6 +130,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -140,7 +144,7 @@ "In this recap exercise, write the [add_key.yml](/edit/notebooks/exercises-07/add_key.yml) playbook which:\n", "\n", " - authenticates with `root:root` credentials on remote hosts\n", - " - generate a new ecdsa PK for local `root`\n", + " - generate a new ed25519 PK for local `root`\n", " - creates the fizz user on remote hosts with:\n", " * a keypair\n", " * a password\n", diff --git a/ansible-101/notebooks/08_yaml_pitfalls.ipynb b/ansible-101/notebooks/08_yaml_pitfalls.ipynb index e5192bb..dcb7115 100644 --- a/ansible-101/notebooks/08_yaml_pitfalls.ipynb +++ b/ansible-101/notebooks/08_yaml_pitfalls.ipynb @@ -1,27 +1,12 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "# Yaml\n", - "\n", - "Except for `ansible.cfg` which is an .ini file, all ansible files are in `yaml`, so let's spent some time on YAML.\n", - "\n", - "At the end of [inventories chapter](/notebooks/notebooks/05_inventories.ipynb) we showed how to use yaml to define list and maps variables.\n", - "\n", - "In this chapter we'll go a bit further.\n", - "\n", - "## Goals\n", - "\n", - " - what's yaml\n", - " - parsing and dumping yaml with python\n", - " - strings: quotes and multi-line\n", - " \n", - "## See also\n", - "\n", - " - [Yaml in ansible](http://docs.ansible.com/ansible/YAMLSyntax.html)\n", - " - [Yaml reference](http://www.yaml.org/spec/1.2/spec.html)" + "# YAML Pitfalls\n", + "#### ([exercise directory](/tree/notebooks/exercise-06))" ] }, { @@ -34,10 +19,35 @@ ] }, { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "Except for `ansible.cfg` which is an .ini file, all ansible files are in YAML, so let's spent some time on YAML.\n", + "\n", + "At the end of [inventories chapter](/notebooks/notebooks/05_inventories.ipynb) we showed how to use YAML to define list and maps variables.\n", + "\n", + "In this chapter we'll go a bit further.\n", + "\n", + "## Goals\n", + "\n", + "- what's YAML\n", + "- parsing and dumping yaml with python\n", + "- strings: quotes and multi-line\n", + "\n", + "## See also\n", + "\n", + "- [YAML in ansible](https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html)\n", + "- [YAML reference](http://www.yaml.org/spec/1.2/spec.html)" + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## What's yaml?" + "## What's YAML?" ] }, { @@ -49,9 +59,9 @@ "import yaml\n", "\n", "txt = \"\"\"\n", - "{ \"yaml\": 'is', 'a superset': 'of json'}\n", + "{ \"YAML\": 'is', '~ a superset': 'of JSON'}\n", "\"\"\"\n", - "ret = yaml.load(txt)\n", + "ret = yaml.safe_load(txt)\n", "print(ret)" ] }, @@ -62,7 +72,7 @@ "outputs": [], "source": [ "# Yoda loves dictionaries ;)\n", - "print(yaml.dump(ret))" + "print(yaml.safe_dump(ret))" ] }, { @@ -72,7 +82,7 @@ "outputs": [], "source": [ "# Customized dumper\n", - "print(yaml.dump(ret, default_flow_style=False))" + "print(yaml.safe_dump(ret, default_flow_style=False))" ] }, { @@ -86,8 +96,8 @@ "you: {'can':'use', 'brace':'syntax'}\n", "\"\"\"\n", "\n", - "ret = yaml.load(txt)\n", - "print(yaml.dump(ret))" + "ret = yaml.safe_load(txt)\n", + "print(yaml.safe_dump(ret))" ] }, { @@ -96,7 +106,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(yaml.dump(ret, default_flow_style=False))" + "print(yaml.safe_dump(ret, default_flow_style=False))" ] }, { @@ -106,14 +116,14 @@ "outputs": [], "source": [ "# Yaml can describe list..\n", - "print(yaml.load(\"\"\"\n", + "print(yaml.safe_load(\"\"\"\n", "- tasks:\n", " - contains \n", " - a\n", " - list\n", " - of\n", " - modules\n", - "\"\"\"))\n" + "\"\"\"))" ] }, { @@ -123,7 +133,7 @@ "outputs": [], "source": [ "# .. and maps / dicts\n", - "print(yaml.load(\"\"\"\n", + "print(yaml.safe_load(\"\"\"\n", "- tasks:\n", " - name: \"this dict has two keys: name and debug\"\n", " debug: msg=\"Welcome to Rimini!\"\n", @@ -131,6 +141,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -152,7 +163,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(yaml.load(\"\"\"\n", + "print(yaml.safe_load(\"\"\"\n", "this_works: http://no-spaces-after-colon:8080\n", "\"\"\"))\n" ] @@ -163,7 +174,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(yaml.load(\"\"\"this_no: spaces: after colon\"\"\")) \n" + "print(yaml.safe_load(\"\"\"this_no: spaces: after colon\"\"\")) \n" ] }, { @@ -173,7 +184,7 @@ "outputs": [], "source": [ "# Quoting is important!\n", - "print(yaml.load(\"\"\"\n", + "print(yaml.safe_load(\"\"\"\n", "that: \"works: though\"\n", "\"\"\"))" ] @@ -185,23 +196,24 @@ "outputs": [], "source": [ "# This is fine \n", - "print(yaml.load(\"\"\"\n", + "print(yaml.safe_load(\"\"\"\n", "this_is: fine={{in_yaml}} but\n", "\"\"\"))\n", "\n", "# but with ansible you should\n", - "print(yaml.load(\"\"\"\n", - "always: quote=\"{{moustaches}}\"\n", + "print(yaml.safe_load(\"\"\"\n", + "always: quote=\"{{ moustaches }}\"\n", "\"\"\"))" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### Long texts\n", "\n", - "Long texts are easy and clean as long as you *** use `>` and `|` instead of quoting ***." + "Long texts are easy and clean as long as you ***use `>` and `|` instead of quoting***." ] }, { @@ -231,6 +243,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -258,7 +271,7 @@ " Rimini, or the ancient Ariminum, \n", " is an art heritage city with over 22 centuries of history. \n", " \n", - " In 268 B.C., the Roman Senate nsent six thousand settlers \n", + " In 268 B.C., the Roman Senate sent six thousand settlers \n", " who founded the city that was meant to be strategically central \n", " and to develop to this day.\n", "\n", @@ -266,7 +279,7 @@ " Rimini, or the ancient Ariminum, \n", " is an art heritage city with over 22 centuries of history. \n", " \n", - " In 268 B.C., the Roman Senate nsent six thousand settlers \n", + " In 268 B.C., the Roman Senate sent six thousand settlers \n", " who founded the city that was meant to be strategically central \n", " and to develop to this day.\n", "\n", @@ -278,6 +291,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -298,14 +312,15 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "# Yaml and Ansible recap\n", + "# YAML and Ansible recap\n", "\n", - "## Yaml\n", + "## YAML\n", "\n", - " - all files are yaml but inventories (for now) and ansible.cfg\n", + " - all files are yaml but inventories (for now) and `ansible.cfg`\n", " - prefer `>` and `|` over quote hell\n", " - ALWAYS QUOTE `:`\n", "\n", @@ -323,6 +338,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/ansible-101/notebooks/diagrams.md b/ansible-101/notebooks/diagrams.md new file mode 100644 index 0000000..1f38588 --- /dev/null +++ b/ansible-101/notebooks/diagrams.md @@ -0,0 +1,108 @@ +# Docker connectors + + +```mermaid +graph LR + +subgraph docker[docker fab:fa-docker] +subgraph pad[ ] +c1[container1 fa:fa-cube] +c2[container2 fa:fa-cube] +s1((IP 1 fa:fa-network-wired)) +s2((IP 2 fa:fa-network-wired)) +end +dockerds((docker\nIP fa:fa-network-wired)) +dockerd[[Docker\nserver\n fa:fa-cogs fab:fa-docker]] +end +s1 -.-> c1 +s2 -.-> c2 + +subgraph ansible_connection +ssh-inventory[ssh module fa:fa-lock] +docker-inventory[docker module fab:fa-docker] +end + +ssh-inventory -->|ssh| s1 & s2 +docker-inventory -->|docker| dockerds +dockerds -->dockerd +c1 & c2 ---|<| dockerd + +``` + +# Ansible architecture + +```mermaid +graph + +vault[vault fa:fa-lock] +keys[ssh keys fa:fa-key] +subgraph ansible.cfg[ansible.cfg fa:fa-file ] +other +vault_config +auth_config +end + +subgraph inventory[inventory fa:fa-file / fa:fa-globe] +s1[prod fa:fa-server] +s2[test fa:fa-server] +s3[other... fa:fa-server] +end + +subgraph group_vars[group_vars fa:fa-folder / fa:fa-globe ] +gv1[prod] +gv2[test] +end + +ansible.cfg -->|references| inventory +s1 -.- gv1 +s2 -.- gv2 + +vault_config --> vault +auth_config --> keys + +ansible.cfg --->|references| modules & roles[roles fa:fa-folder] +``` + + +# IaC + +```mermaid +--- +title: Infrastructure as Code +--- +graph LR + +dev(("  fa:fa-user fa:fa-laptop  \ndev ")) +--> |fa:fa-code-pull-request\npull request| repo[(fa:fa-code\nrepository\nscripts\ntemplates\nhosts)] +--> engine + +engine --- |>\nOK| infrastructure +dev ---|<\nKO|engine +engine -.-o|get data| ITSM[ITSM fa:fa-server fa:fa-gears] +subgraph engine[IaC Engine fa:fa-gears] +analysis +--- validation{check} +--- deployment +end + +subgraph infrastructure[Infrastructure] +direction TB +prod["Production\nfa:fa-server fa:fa-server fa:fa-server +fa:fa-database fa:fa-database fa:fa-database +fa:fa-network-wired fa:fa-network-wired fa:fa-network-wired"] +test["Test\nfa:fa-server fa:fa-server fa:fa-server +fa:fa-database fa:fa-database fa:fa-database +fa:fa-network-wired fa:fa-network-wired fa:fa-network-wired"] +end + +linkStyle 3 stroke:red +linkStyle 2 stroke:green +``` + +# Ansible + +- a static or dynamic inventory of all the nodes to manage +- ssh keys to use +- users and secrets to connect to the hosts +- whether to do privilege escalation (eg. sudo, ...) before running tasks +- if nodes should be accessed via a bastion host, docker, ... diff --git a/ansible-101/notebooks/exercise-01/.pin b/ansible-101/notebooks/exercise-01/.pin deleted file mode 100644 index d97c5ea..0000000 --- a/ansible-101/notebooks/exercise-01/.pin +++ /dev/null @@ -1 +0,0 @@ -secret diff --git a/ansible-101/notebooks/exercise-01/ansible.cfg b/ansible-101/notebooks/exercise-01/ansible.cfg index 84cb2fe..d8e7bdb 100644 --- a/ansible-101/notebooks/exercise-01/ansible.cfg +++ b/ansible-101/notebooks/exercise-01/ansible.cfg @@ -10,5 +10,11 @@ host_key_checking = no private_key_file = id_ansible # We want to store root password in an encrypted file -# but don't want to be prompted for a password each time -vault_password_file = .pin +# but don't want to be prompted for a password each time. +# N.B. If this file is not present, ansible will show an error! +# vault_password_file = .pin + + +[ssh_connection] +# We will discuss this line later on. +ssh_args = -o UserKnownHostsFile=/dev/null diff --git a/ansible-101/notebooks/exercise-01/copy-key.yml b/ansible-101/notebooks/exercise-01/copy-key.yml index 13dca46..9a370f1 100644 --- a/ansible-101/notebooks/exercise-01/copy-key.yml +++ b/ansible-101/notebooks/exercise-01/copy-key.yml @@ -2,7 +2,7 @@ # Load variables from the following files - hosts: course vars_files: - - vault.yml + - vault.yaml tasks: - name: Ensure /root/.ssh is present file: @@ -18,3 +18,8 @@ owner: root group: root force: yes +# +# Bonus track: +# search on the Internet an Ansible module to +# manage the authorized_keys. +# diff --git a/ansible-101/notebooks/exercise-01/inventory.docker b/ansible-101/notebooks/exercise-01/inventory.docker new file mode 100644 index 0000000..35423e7 --- /dev/null +++ b/ansible-101/notebooks/exercise-01/inventory.docker @@ -0,0 +1,15 @@ +# +# This inventory file is used to access the course machines +# using the docker connection plugin and perform +# some management tasks. For example, you can change the root +# password of the machines using the following command: +# +# ansible -i inventory.docker \ +# -m shell \ +# -a "echo 'root:{{root_password}}' | chpasswd" \ +# -e@vault.yml \ +# docker +# +[docker] +ansible-101_web_[1:2] ansible_connection=docker +ansible-101_bastion_1 ansible_connection=docker diff --git a/ansible-101/notebooks/exercise-03/ansible.cfg b/ansible-101/notebooks/exercise-03/ansible.cfg index da925d1..9dd8bad 100644 --- a/ansible-101/notebooks/exercise-03/ansible.cfg +++ b/ansible-101/notebooks/exercise-03/ansible.cfg @@ -1,3 +1,6 @@ +# +# Exercise: insert comments for every parameter. +# [defaults] inventory = inventory retry_files_enabled = no diff --git a/ansible-101/notebooks/exercise-03/cartesian.yml b/ansible-101/notebooks/exercise-03/cartesian.yml index 2d679b9..4ab37f5 100644 --- a/ansible-101/notebooks/exercise-03/cartesian.yml +++ b/ansible-101/notebooks/exercise-03/cartesian.yml @@ -1,3 +1,4 @@ +--- # # Ansible can generate cartesian products # which are useful for checking @@ -5,10 +6,10 @@ # - hosts: localhost tasks: - - name: Here is a cartesian product - debug: - msg: > - {{ 'Installing ' + item[0] + "-" + item[1] }} - with_cartesian: - - ["python", "python3"] - - [ "tox", "nose", "dnspython" ] + - name: Here is a cartesian product + debug: + msg: > + {{ 'Installing ' + item[0] + "-" + item[1] }} + with_cartesian: + - [python, python3] + - [tox, nose, dnspython] diff --git a/ansible-101/notebooks/exercise-03/conditions.yml b/ansible-101/notebooks/exercise-03/conditions.yml index 271d84d..5e1bf17 100644 --- a/ansible-101/notebooks/exercise-03/conditions.yml +++ b/ansible-101/notebooks/exercise-03/conditions.yml @@ -1,24 +1,26 @@ - +--- - hosts: localhost tags: when tasks: - - debug: msg="This always happens" - when: true - - debug: msg="This never does" - when: false + - debug: msg="This always happens" + when: true + - debug: msg="This never does" + when: false - hosts: localhost tags: assert tasks: - - debug: msg="This is Linux!" - when: ansible_system == 'Linux' - - - fail: msg="Stop processing now!" - when: ansible_architecture == 'x86_64' + - debug: + msg: "This is Linux!" + when: ansible_system == 'Linux' + - fail: + msg: "Stop processing now!" + when: ansible_architecture == 'x86_64' - hosts: localhost tags: exercise tasks: - - debug: msg="Replace me" - when: true + - debug: + msg: "Replace me" + when: true diff --git a/ansible-101/notebooks/exercise-03/environment.yml b/ansible-101/notebooks/exercise-03/environment.yml index 92f0d78..f0de447 100644 --- a/ansible-101/notebooks/exercise-03/environment.yml +++ b/ansible-101/notebooks/exercise-03/environment.yml @@ -1,8 +1,9 @@ +--- - hosts: localhost tasks: - - name: Iterate thru environment - debug: - var: > - ansible_env.{{item}} - with_items: > - {{ansible_env | map('regex_search', '^PATH') | select('string') | list }} + - name: Iterate thru environment + debug: + var: > + ansible_env.{{ item }} + with_items: > + {{ ansible_env | map('regex_search', '^PATH') | select('string') | list }} diff --git a/ansible-101/notebooks/exercise-03/fileglob.yml b/ansible-101/notebooks/exercise-03/fileglob.yml index 874070c..0721028 100644 --- a/ansible-101/notebooks/exercise-03/fileglob.yml +++ b/ansible-101/notebooks/exercise-03/fileglob.yml @@ -1,15 +1,18 @@ +--- - hosts: web tags: fileglob tasks: - - name: You can implement a remote fileglob in 2 steps - shell: > - ls /etc/host* - register: fileglob_remote + - name: You can implement a remote fileglob in 2 steps + shell: > + ls /etc/host* + register: fileglob_remote - - name: Here we go - debug: msg="{{item}}" - with_items: > - {{fileglob_remote.stdout_lines}} + - name: Here we go + debug: + msg: >- + {{ item }} + with_items: > + {{ fileglob_remote.stdout_lines }} - hosts: web, localhost @@ -17,9 +20,10 @@ environment: WELCOME: Benvenuti a Rimini! tasks: - - name: Passing a shell variable to the whole play - shell: > - echo $WELCOME - register: o - - name: EuroPython is here - debug: var=o.stdout + - name: Passing a shell variable to the whole play + shell: > + echo $WELCOME + register: o + - name: EuroPython is here + debug: + var: o.stdout diff --git a/ansible-101/notebooks/exercise-03/get-facts-solution.yml b/ansible-101/notebooks/exercise-03/get-facts-solution.yml new file mode 100644 index 0000000..b89fba1 --- /dev/null +++ b/ansible-101/notebooks/exercise-03/get-facts-solution.yml @@ -0,0 +1,27 @@ +--- +# +# Use this playbook to dump ansible facts +# +- hosts: localhost + name: > + Use this section to complete the exercise, + and the next section as a reference for + the debug module. + tasks: + - name: Dump the requested facts. + debug: + msg: A string + - name: Hostname + debug: + msg: >- + {{ ansible_hostname }} + - name: First ip address + debug: + msg: >- + {{ ansible_all_ipv4_addresses[0] }} + - name: Distribution + debug: + msg: "{{ ansible_distribution }}" + - name: Kernel release + debug: + msg: "{{ ansible_kernel }}" diff --git a/ansible-101/notebooks/exercise-03/get-facts.yml b/ansible-101/notebooks/exercise-03/get-facts.yml index 24125eb..afb9dd0 100644 --- a/ansible-101/notebooks/exercise-03/get-facts.yml +++ b/ansible-101/notebooks/exercise-03/get-facts.yml @@ -1,3 +1,4 @@ +--- # # Use this playbook to dump ansible facts # @@ -7,9 +8,9 @@ and the next section as a reference for the debug module. tasks: - - name: Dump the requested facts. - debug: msg="A string" - + - name: Dump the requested facts. + debug: + msg: A string # # Read this for reference @@ -17,17 +18,21 @@ - hosts: localhost name: Those tasks are run on localhost only tasks: - - name: This is the debug module. - debug: msg="A string" + - name: This is the debug module. + debug: + msg: A string - - name: The debug module can print variables too - debug: var=ansible_hostname + - name: The debug module can print variables too + debug: + var: ansible_hostname - - name: We can iterate too - debug: msg="{{item}}" - with_items: "{{ansible_system_capabilities}}" + - name: We can iterate too... + debug: + msg: "{{ item }}" + with_items: "{{ ansible_system_capabilities }}" - - name: and inspect/format variables - debug: msg="The device is {{item.device}}" - with_items: > - {{ansible_mounts}} + - name: ..and inspect/format variables + debug: + msg: The device is {{ item.device }} + with_items: > + {{ ansible_mounts }} diff --git a/ansible-101/notebooks/exercise-03/vars-and-facts-solution.yml b/ansible-101/notebooks/exercise-03/vars-and-facts-solution.yml new file mode 100644 index 0000000..ba9f4e6 --- /dev/null +++ b/ansible-101/notebooks/exercise-03/vars-and-facts-solution.yml @@ -0,0 +1,38 @@ +--- +- hosts: localhost + tags: variables + vars: + - one: 1 + - pi: 3.1415 + tasks: + - debug: + msg: > + {{ ansible_hostname }} {{ one }} + - debug: + msg: > + The output is {{ one + pi }} + + +- hosts: localhost + tags: filters + vars: + - one: 1 + - pi: 3.1415 + tasks: + - name: Ansible can process expressions using filters + debug: + msg: > + Floor it {{ ( one + pi ) | int }} + +- hosts: localhost + tags: exercise + name: Use this cell for the exercise + tasks: + - name: > + Show the free percentage of the local disk using + facts and filters. Restricted to unique devices. + debug: + msg: > + {{ item.device }} {{ (100 * item.size_available / item.size_total) | int }}% + with_items: >- + {{ ansible_mounts}} diff --git a/ansible-101/notebooks/exercise-03/vars-and-facts.yml b/ansible-101/notebooks/exercise-03/vars-and-facts.yml index 289edfc..f35e902 100644 --- a/ansible-101/notebooks/exercise-03/vars-and-facts.yml +++ b/ansible-101/notebooks/exercise-03/vars-and-facts.yml @@ -1,34 +1,38 @@ --- +# +# This playbook has 3 tasklists, each with a different tag. +# - hosts: localhost tags: variables vars: - - one: 1 - - pi: 3.1415 + - one: 1 + - pi: 3.1415 tasks: - - debug: - msg: > - {{ansible_hostname }} {{ one }} - - debug: - msg: > - The output is {{ one + pi }} + - debug: + msg: > + {{ ansible_hostname }} {{ one }} + - debug: + msg: > + The output is {{ one + pi }} - hosts: localhost tags: filters vars: - - one: 1 - - pi: 3.1415 + - one: 1 + - pi: 3.1415 tasks: - - name: Ansible can process expressions using filters - debug: - msg: > - Floor it {{ ( one + pi ) | int }} + - name: Ansible can process expressions using filters + debug: + msg: > + Floor it {{ ( one + pi ) | int }} - hosts: localhost tags: exercise name: Use this cell for the exercise tasks: - - name: > - Show the free percentage of the local disk using - facts and filters - debug: msg="Replace me" + - name: > + Show the free percentage of the local disk using + facts and filters + debug: + msg: "Replace me" diff --git a/ansible-101/notebooks/exercise-05/inventory b/ansible-101/notebooks/exercise-05/inventory index 73e3cc2..0b65463 100644 --- a/ansible-101/notebooks/exercise-05/inventory +++ b/ansible-101/notebooks/exercise-05/inventory @@ -1,4 +1,4 @@ -# I can group hosts in inventory +# I can group hosts in an inventory. [web:children] web_rome @@ -8,28 +8,29 @@ web_milan web_rome_test web_rome_prod +# Fix IP addresses if they don't match. [web_rome_prod] -172.23.0.[3:4] +172.17.0.[3:4] [web_milan] 172.24.0.[5:6] -# further host variables +# Further host variables. [web_rome:vars] ansible_username=root -# Connect with docker +# Connect with docker. [web_rome_test:vars] ansible_connection=docker ansible_docker_extra_args="-Htcp://172.17.0.1" -# The actual host reference +# The actual host reference. [web_rome_test] ansible101_web_1 # -# Don't need to be Ssh +# Don't need to be ssh # to be my local machine ;) # [course] diff --git a/ansible-101/notebooks/exercise-05/inventory-docker-solution.py b/ansible-101/notebooks/exercise-05/inventory-docker-solution.py index f2ca03b..b7c7cdb 100755 --- a/ansible-101/notebooks/exercise-05/inventory-docker-solution.py +++ b/ansible-101/notebooks/exercise-05/inventory-docker-solution.py @@ -6,7 +6,7 @@ from collections import defaultdict # -# Manage different docker libraries +# Support different docker libraries. # try: from docker import Client @@ -19,32 +19,48 @@ logging.basicConfig(level=logging.DEBUG) -def print_hosts(): +def get_inventory_data(container): + return { + "container_name": container["Names"][0][1:], + "ip_address": container["NetworkSettings"]["Networks"]["bridge"]["IPAddress"], + "group_name": container["Labels"].get("com.docker.compose.service"), + } + + +def create_inventory(): + # + # Create a Docker client connecting to the docker daemon port. + # c = Client(base_url="http://172.17.0.1:2375/") - container_fmt = lambda x: ( - x["Names"][0][1:], - x["NetworkSettings"]["Networks"]["bridge"]["IPAddress"], - ) + inventory = {} - inventory = dict() + for container in c.containers(): + # Use str.format to log the container information. + host = get_inventory_data(container) + log.debug("Processing entry: {container_name}\t\t{ip_address}".format(**host)) - for x in c.containers(): - log.debug("Processing entry %r", "\t\t".join(container_fmt(x))) + # Skip parsing errors, and log a warning. try: - group_name = x["Labels"]["com.docker.compose.service"] - ip_address = x["NetworkSettings"]["Networks"]["bridge"]["IPAddress"] + group_name = host["group_name"] + ip_address = host["ip_address"] if group_name not in inventory: inventory[group_name] = defaultdict(list) inventory[group_name]["hosts"].append(ip_address) except KeyError: log.warning("Host not run via docker-compose: skipping") - inventory["web"]["host_vars"] = { + # + # Replace host variables for the "web" group. + # + inventory["web"]["vars"] = { "ansible_ssh_common_args": " -o StrictHostKeyChecking=no " } ret = json.dumps(inventory, indent=True) return ret - +# +# Execute the script. +# if __name__ == "__main__": - print(print_hosts()) + inventory_text = create_inventory() + print(inventory_text) diff --git a/ansible-101/notebooks/exercise-05/inventory-docker.py b/ansible-101/notebooks/exercise-05/inventory-docker.py index bdcbe5d..1455e44 100755 --- a/ansible-101/notebooks/exercise-05/inventory-docker.py +++ b/ansible-101/notebooks/exercise-05/inventory-docker.py @@ -1,27 +1,41 @@ #!/usr/bin/env python -# List our containers. Note: this only works with docker-compose containers. +# +# Exercise: Complete this inventory script. +# +# Note: this only works with docker-compose containers +# setting the "com.docker.compose.service" label. +# from __future__ import print_function import logging -from collections import defaultdict log = logging.getLogger() logging.basicConfig(level=logging.DEBUG) - +# +# Run this script in jupyter and fix one issue at a time. +# c = Client(base_url="http://172.17.0.1:2375/") -container_fmt = lambda x: ( - x["Names"][0][1:], - x["Labels"]["com.docker.compose.service"], - x["NetworkSettings"]["Networks"]["bridge"]["IPAddress"], -) +def get_inventory_data(container): + return { + "container_name": container["Names"][0][1:], + "ip_address": container["NetworkSettings"]["Networks"]["bridge"]["IPAddress"], + "group_name": container["Labels"].get("com.docker.compose.service"), + } + + +inventory = {} + +for container in c.containers(): + # Use str.format to log the container information. + host = get_inventory_data(container) + log.debug("Processing entry: {container_name}\t\t{ip_address}".format(**host)) + group_name = host["group_name"] + ip_address = host["ip_address"] -inventory = defaultdict(list) + if group_name not in inventory: + inventory[group_name]= {"hosts": []} -for x in c.containers(): - log.debug("Processing entry %r", "\t\t".join(container_fmt(x))) - group_name = x["Labels"]["com.docker.compose.service"] - ip_address = x["NetworkSettings"]["Networks"]["bridge"]["IPAddress"] - inventory[group_name].append(ip_address) + inventory[group_name]["hosts"].append(ip_address) diff --git a/ansible-101/notebooks/exercise-06/ansible.cfg b/ansible-101/notebooks/exercise-06/ansible.cfg index 8b2c0ff..b983eff 100644 --- a/ansible-101/notebooks/exercise-06/ansible.cfg +++ b/ansible-101/notebooks/exercise-06/ansible.cfg @@ -1,7 +1,6 @@ -[defaults] -inventory = inventory -host_key_checking = False -vault_password_file = pin - -[ssh_connection] -ssh_args = -F ssh_config +# +# Write here your ansible.cfg +# +# * reference a pin file +# * pass [`[ssh_connection]`](http://docs.ansible.com/ansible/intro_configuration.html#openssh-specific-settings) arguments to avoid ssh key mismatches +# * point to the local inventory diff --git a/ansible-101/notebooks/exercise-06/ansible.cfg-solution b/ansible-101/notebooks/exercise-06/ansible.cfg-solution new file mode 100644 index 0000000..8b2c0ff --- /dev/null +++ b/ansible-101/notebooks/exercise-06/ansible.cfg-solution @@ -0,0 +1,7 @@ +[defaults] +inventory = inventory +host_key_checking = False +vault_password_file = pin + +[ssh_connection] +ssh_args = -F ssh_config diff --git a/ansible-101/notebooks/exercise-06/inventory b/ansible-101/notebooks/exercise-06/inventory index 12bf354..e4ef255 100644 --- a/ansible-101/notebooks/exercise-06/inventory +++ b/ansible-101/notebooks/exercise-06/inventory @@ -1,2 +1,3 @@ -[course] -172.17.0.[3:9] ansible_password=root +# +# Inventory file for the 06_bastion exercise. +# diff --git a/ansible-101/notebooks/exercise-06/inventory-solution b/ansible-101/notebooks/exercise-06/inventory-solution new file mode 100644 index 0000000..12bf354 --- /dev/null +++ b/ansible-101/notebooks/exercise-06/inventory-solution @@ -0,0 +1,2 @@ +[course] +172.17.0.[3:9] ansible_password=root diff --git a/ansible-101/notebooks/exercise-06/vault.yml b/ansible-101/notebooks/exercise-06/vault.yml index e69de29..e1e7dc5 100644 --- a/ansible-101/notebooks/exercise-06/vault.yml +++ b/ansible-101/notebooks/exercise-06/vault.yml @@ -0,0 +1,3 @@ +# +# Provide credentials in this vault file. +# diff --git a/ansible-101/notebooks/exercise-07/debug.yml b/ansible-101/notebooks/exercise-07/debug.yml index 593a365..870574e 100644 --- a/ansible-101/notebooks/exercise-07/debug.yml +++ b/ansible-101/notebooks/exercise-07/debug.yml @@ -3,49 +3,20 @@ name: those tasks are run on localhost only tasks: - name: This is the debug module. - debug: msg="A string" - - - name: The debug module can print a variable too - debug: var=ansible_hostname + debug: + msg: >- + "A string" + - name: The debug module can print a variable too... + debug: + var: ansible_hostname - hosts: web tags: groups - name: those tasks are run on every web host - tasks: - - name: Iterating over a list - debug: - msg: > - Host: {{item}} - with_items: > - {{ groups['web'] }} - -- hosts: web[0] - tags: hostvars - name: Run only on first host + name: Those tasks are run on every web host tasks: - - name: Iterating over a map yields the keys + - name: Iterating over the host in the "web" group. debug: msg: > - Host: {{item}} - with_items: > - {{ hostvars }} - - name: Use subscript to access entries - debug: - msg: > - address: {{ hostvars[item]['ansible_default_ipv4']['address'] }} + Host: {{ item }} with_items: > {{ groups['web'] }} - - -- hosts: web[0] - tags: filters - name: Get a list of defined host ips. - tasks: - - - name: Using filters - debug: - msg: > - {{ groups['web'] | - map('extract', hostvars, ['ansible_default_ipv4', 'address']) | - reject('undefined') | - list }} diff --git a/ansible-101/notebooks/exercise-07/hostvars.yml b/ansible-101/notebooks/exercise-07/hostvars.yml new file mode 100644 index 0000000..1ad3efc --- /dev/null +++ b/ansible-101/notebooks/exercise-07/hostvars.yml @@ -0,0 +1,47 @@ +# - hosts: web +# tasks: +# - name: >- +# Ensure iproute2 is installed on every node. +# NB: This may require specific python modules. +# apt: +# name: iproute2 +# state: present +- hosts: web + tags: debug + tasks: + - name: hostvars is a dictionary + debug: + var: hostvars + +- hosts: web + tags: hostvars + name: Last task runs only on first host + tasks: + - name: Iterating over a dictionary yields the keys + debug: + msg: > + Host: {{ item }} + with_items: > + {{ hostvars }} + - name: >- + Use subscript to access entries. + NB: This task requires iproute2 installed on the remote host. + run_once: true + debug: + msg: > + address: {{ item }} -> {{ hostvars[item]['ansible_default_ipv4']['address'] }} + with_items: > + {{ groups['web'] }} + + +- hosts: web[0] + tags: filters + name: Get a list of defined host IPs. + tasks: + - name: Using filters + debug: + msg: > + {{ groups['web'] | + map('extract', hostvars, ['ansible_default_ipv4', 'address']) | + reject('undefined') | + list }} diff --git a/ansible-101/notebooks/exercise-07/inventory b/ansible-101/notebooks/exercise-07/inventory index 38c1bd6..8a992a9 100644 --- a/ansible-101/notebooks/exercise-07/inventory +++ b/ansible-101/notebooks/exercise-07/inventory @@ -2,8 +2,6 @@ ansible_connection=docker ansible_docker_extra_args="-Htcp://172.17.0.1" - - # The actual host reference [web] -ansible101_web_[1:3] +ansible-101_web_[1:2] diff --git a/ansible-101/notebooks/exercise-07/package-user.yml b/ansible-101/notebooks/exercise-07/package-user.yml index 38a6434..7b4e992 100644 --- a/ansible-101/notebooks/exercise-07/package-user.yml +++ b/ansible-101/notebooks/exercise-07/package-user.yml @@ -1,13 +1,16 @@ - hosts: web tasks: - name: Install apache and other packages eventually iterating - apt: name="{{item}}" + apt: + name: "{{item}}" with_items: - apache2 - curl - name: Remove wget - apt: name=wget state=absent + apt: + name: wget + state: absent - name: Create user user: @@ -24,7 +27,7 @@ src: package-user.yml dest: "{{ '~foo' | expanduser }}" - - name: > + - name: |- We can fetch files too (eg. logs): - files are dispatched to different directories. - we can avoid enforcing checksum as we're messing with diff --git a/ansible-101/notebooks/exercise-07/process-facts.yml b/ansible-101/notebooks/exercise-07/process-facts.yml index a458516..74998af 100644 --- a/ansible-101/notebooks/exercise-07/process-facts.yml +++ b/ansible-101/notebooks/exercise-07/process-facts.yml @@ -1,3 +1,7 @@ +# +# This playbook shows how to process facts without gathering them, +# e.g., for testing purposes. +# --- - hosts: localhost gather_facts: no diff --git a/ansible-101/notebooks/exercise-07/shell-output-01.yml b/ansible-101/notebooks/exercise-07/shell-output-01.yml index bb58a12..91cc241 100644 --- a/ansible-101/notebooks/exercise-07/shell-output-01.yml +++ b/ansible-101/notebooks/exercise-07/shell-output-01.yml @@ -9,9 +9,11 @@ register: o - name: Show output in a single block... - debug: var=o.stdout + debug: + var: o.stdout - name: ... or one per line - debug: var=item + debug: + var: item with_items: > {{ o.stdout_lines }} diff --git a/ansible-101/notebooks/intro.ipynb b/ansible-101/notebooks/intro.ipynb index 13065c3..ae81eee 100644 --- a/ansible-101/notebooks/intro.ipynb +++ b/ansible-101/notebooks/intro.ipynb @@ -28,14 +28,26 @@ " - [edit an existing file](/edit/notebooks/untitled.txt)\n", " - add more cells with `ALT+ENTER`\n", " \n", - "Go to the [basic python course](/tree/notebooks/rendered_notebooks/python-basic)" + "## Python terminal\n", + "\n", + "With Jupyter, you have a Python terminal at your disposal.\n", + "You can run Python code:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "a string and the number 1\n", + "now s is increased 2\n" + ] + } + ], "source": [ "# You can evaluate maths and strings\n", "s = 1\n", @@ -45,6 +57,88 @@ "print(\"now s is increased \" + str(s))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run simple, non interactive bash commands in a python cell, prefix them with `!`.\n", + "When you run a cell, the output is displayed below the cell.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "totale 576\n", + "-rw-rw-r--. 1 rpolli rpolli 16659 12 mag 17.10 00_teaser.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 8833 11 mag 11.38 01_architecture.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 9772 12 mag 15.25 02_delivery_layout.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 8312 15 mag 10.50 02_vaults.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 7433 15 mag 10.24 03_facts_and_variables.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 8258 6 lug 2019 04_loops_and_conditions.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 9868 22 set 2021 05_inventories.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 7306 6 lug 2019 06_bastion_and_ssh.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 14130 6 lug 2019 07_playbooks.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 5240 6 lug 2019 07_templates.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 8942 6 lug 2019 08_yaml_pitfalls.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 5892 6 lug 2019 09_roles.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 3260 6 lug 2019 10_galaxy_ok.ipynb\n", + "-rw-rw-r--. 1 rpolli rpolli 408823 22 set 2021 ansible-101.graphml\n", + "-rwxrwxr-x. 1 rpolli rpolli 263 22 set 2021 cleanup.sh\n", + "-rw-rw-r--. 1 rpolli rpolli 395 22 set 2021 custom.css\n", + "-rw-rw-r--. 1 rpolli rpolli 1718 15 mag 15.19 diagrams.md\n", + "drwxrwxr-x. 3 rpolli rpolli 142 11 mag 11.22 exercise-00\n", + "drwxrwxr-x. 2 rpolli rpolli 116 15 mag 10.48 exercise-01\n", + "drwxrwxr-x. 2 rpolli rpolli 169 11 mag 11.22 exercise-03\n", + "drwxrwxr-x. 3 rpolli rpolli 147 11 mag 11.21 exercise-05\n", + "drwxrwxr-x. 2 rpolli rpolli 129 11 mag 11.22 exercise-06\n", + "drwxrwxr-x. 2 rpolli rpolli 4096 11 mag 11.22 exercise-07\n", + "drwxrwxr-x. 3 rpolli rpolli 89 6 lug 2019 exercise-08\n", + "drwxrwxr-x. 2 rpolli rpolli 87 11 mag 11.22 exercise-09\n", + "drwxrwxr-x. 2 rpolli rpolli 67 6 lug 2019 images\n", + "-rw-rw-r--. 1 rpolli rpolli 5519 15 mag 15.31 intro.ipynb\n", + "drwxrwxr-x. 5 rpolli rpolli 59 15 mag 10.58 rendered_notebooks\n", + "-rw-rw-r--. 1 rpolli rpolli 35 22 set 2021 untitled.txt\n", + "-rw-rw-r--. 1 rpolli rpolli 163 22 set 2021 web\n" + ] + } + ], + "source": [ + "! ls -l" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "During the course, you will need to execute some shell code, that is rendered like the following\n", + "\n", + "```bash\n", + "# This is a bash cell.\n", + "echo \"What is your name?\"\n", + "read name\n", + "echo \"Hello $name!\"\n", + "```\n", + "\n", + "In these cases, you need to [open a terminal on the local machine](/terminals/example), for example following links like [this one](/terminals/example) and type the commands there. This is especially required for interactive tasks. \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Python\n", + "\n", + "Go to the [basic python course](/tree/notebooks/rendered_notebooks/python-basic)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -64,14 +158,9 @@ "\n", "Docker is a lightweight container environment. Jupyter and all other \"machines\" are based on docker.\n", "\n", - "[Slideshare presentation](https://www.slideshare.net/ioggstream/docker-virtualizzazione-leggera)\n", - "\n", - "[Reveal JS](http://www.piumalab.org/dockerslide)\n", - "\n", - "Some examples via a [local terminal](/terminals/example)\n", - "\n", + "[Presentation (in Italian)](https://docs.google.com/presentation/d/15swQ2gHWAKYAm_ZbBme9rmzV1CpLNl1npvgrUyODu1s/)\n", "\n", - "Go to your [docker-101course](/notebooks/notebooks/rendered_notebooks/docker-101/01-docker.ipynb)" + "Go to your [docker beginners course](/notebooks/notebooks/rendered_notebooks/docker-101/)\n" ] }, { @@ -176,7 +265,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.13" + "version": "2.7.14" } }, "nbformat": 4, diff --git a/ansible-101/site-digitalocean.yml b/ansible-101/site-digitalocean.yml index 2a2cd50..8f4a9cb 100644 --- a/ansible-101/site-digitalocean.yml +++ b/ansible-101/site-digitalocean.yml @@ -9,42 +9,67 @@ # the digital_ocean module will create those droplets over and # over again. # +# You can retrieve the list of provisioned hosts with +# +# $ ansible --user=root -i /tmp/inventory.do -mshell \ +# -a 'docker exec -ti ansible-101_course_1 jupyter-notebook list | sed -e "s/0.0.0.0/{{ansible_host}}/" ' do +# - hosts: localhost + gather_facts: false + vars: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + ssh_pubkeys: + - rpolli-ed25519 + - esavo-rsa + - dabbasciano-rsa + droplet_names: + - deleteme-1 + - deleteme-2 + tasks: - name: Retrieve ssh key id. - digital_ocean: - state: present - command: ssh - name: rpolli - register: do_key + community.digitalocean.digital_ocean_sshkey_info: + oauth_token: "{{ oauth_token }}" + register: ssh_keys check_mode: no + - name: Set facts based on the gathered information + set_fact: + do_key: "{{ do_key | default([]) + ssh_keys | community.general.json_query(search_query) | default(item) }}" + vars: + search_query: "data[?name=='{{ item }}'].fingerprint" + # search_query: "data[?(@.name.match('{{ ssh_pubkeys | join ('|') }}'))].fingerprint" + # + # This should be the correct query, but Ansible fatals: + # + # fatal: [localhost]: FAILED! => {"msg": "JMESPathError in json_query filter plugin:\nUnknown function: match()"} + loop: "{{ ssh_pubkeys }}" + - name: Create enough machine for the course. - digital_ocean: + community.digitalocean.digital_ocean_droplet: state: present - command: droplet name: "{{item}}" unique_name: yes - ssh_key_ids: >- - {{do_key.ssh_key.id}} - size_id: 2gb - region_id: fra1 - image_id: docker-18-04 + ssh_keys: "{{ do_key }}" + size: s-2vcpu-4gb + region: fra1 + image: docker-18-04 wait_timeout: 500 + user_data: "{{ lookup('file', 'cloudinit.txt') }}" # b64encode is not needed in Ansible register: my_droplet - with_items: - - deleteme-1 - - deleteme-2 + with_items: "{{ droplet_names }}" - name: Dynamically add the new servers to the inventory. add_host: - hostname: > - {{ item.droplet.networks.v4[0].ip_address }} - groups: do + hostname: >- + {{ item.data.droplet.networks.v4 | community.general.json_query(jq) | first }} + groups: + - do ansible_user: root host_key_checking: no with_items: "{{my_droplet.results}}" - + vars: + jq: "[?(@.type=='public')].ip_address" - name: Create a sample inventory file with the server ips. file: @@ -57,13 +82,17 @@ - lineinfile: path: /tmp/inventory.do line: > - {{ item.droplet.networks.v4[0].ip_address }} droplet_id={{item.droplet.id}} + {{ item.data.droplet.networks.v4 | community.general.json_query(jq) | first }} droplet_id={{item.data.droplet.id}} with_items: "{{my_droplet.results}}" + vars: + jq: "[?(@.type=='public')].ip_address" # # Configure the newly created machines # - hosts: do + tags: + - setup vars: - ansible_python_interpreter: /usr/bin/python3 - ansible_user: root @@ -72,6 +101,15 @@ tasks: - ping: - block: + - name: Automatic shutdown crontab entry + cron: + name: "shutdown" + hour: 2 # timezone is Etc/UTC + minute: 25 + state: present + disabled: false + job: "/sbin/shutdown -r now" + - name: Docker should listen on localhost. lineinfile: path: /etc/systemd/system/multi-user.target.wants/docker.service @@ -87,7 +125,6 @@ state: restarted name: docker daemon_reload: yes - when: systemd_updated.changed - name: Check socket shell: | @@ -96,6 +133,7 @@ - name: Download course git: repo: https://github.com/ioggstream/python-course.git + version: ansible-2023 dest: /root/python-course ignore_errors: true diff --git a/docker-101/01-docker.md b/docker-101/01-docker.md new file mode 100644 index 0000000..c7b6c69 --- /dev/null +++ b/docker-101/01-docker.md @@ -0,0 +1,96 @@ + +# Docker 101 + +This is a crash docker course. + +## What is? + +Docker is a platform for developers and sysadmins to develop, + ship, and run applications. + +--- + +## Why it was born? + +To resolve a necessity + +Mobility workload in cloud systems + +- Cargo transport pre-1960 +- Solution: Intermodal Shipping Container + +--- + +### Docker is a Container System for Code + +Docker leverages LXC (Linux Containers), which encompasses Linux +features like cgroups and namespaces for strong process isolation and +resource control. + +They are lightweight and consume less resources than a virtual machine. + +--- + +## Benefits for developers + +- portability, build once... run anywhere +- no worries about missing dependencies, packages and other pain + points during subsequent deployments +- tracking changes +- run each app in its own isolated container, so you can run various + versions of libraries and other dependencies for each app without + worrying +- reduce/eliminate concerns about compatibility on different + platforms, either your own or your customers'. + +--- + +## Benefits for sysadmins + +- portability, configure once... run anywhere +- tracking changes +- simple to understand what application do +- simplify to upgrade application processes. Significantly improves + the speed and reliability of continuous deployment and continuous + integration systems +- eliminate inconsistencies between different enviroments (develop, testing, production) +- re-use other people images +- do you not need an hypervisor. Because the containers are so + lightweight, address significant performance, costs, deployment, and + portability issues normally associated with VMs + +--- + +## Docker Concepts and Interactions + +- **Host**, the machine that is running the containers. +- **Image**, a hierarchy of files, with meta-data for how to run a container. +- **Container**, a contained running process, started from an image. +- **Registry**, a repository of images. +- **Volume**, storage outside the container. +- **Dockerfile**, a script for creating images. + +---- + +[![](https://mermaid.ink/img/pako:eNptU8lu2zAQ_RWBZ8moZctVdOilKdAeekmBHirmMJLGFhMuAjkM6lr-91JbbCcBD3qjefM4G0-sNg2ygiVJwjUJklhE96Z-RhsJTWihJmG043okHCx0LddcO1-NOHrAg3Bkj-UDKkP4akd7qIo9JM2o9ci1WJec_VBwQBd8g6v2FTo-qM1ci00LtET6ymvyi_UEL7Dg7kit0ZwFVdTNTTrfjSOupwL2QmJ5gfOtM7xN7sVIr9CVv6fvTG2AoAKHgVAbTSA0WleWX1_xVSGPgSRNDTJUMXfkfbGXhIdzlVqSfOkrL2TTR7cigyfqCQ5vHdcpTaTaKCXoA4HBab3uo0vE2-hemdDsPpr78M7tCCzFjkwXWxyNWzWuh8aP1M67NgxVDMVf195fVmOiSfkxbRohi5lCq0A0YTlPXEcRZ9SiQs6KABvcg5fEGdfnQPVdmBV-awQZywqyHmMGnsyvo64Xe-LcCwiboli4ULrwtwP9xxi1kILJihP7y4p1lq-263SzyfI83WTp5zRmR1akd7vVLrvb5Vm-2W6zcM4x-zcqfFplMcMxh5_Toxrf1vk_Byk2hw?type=png)](https://mermaid.live/edit#pako:eNptU8lu2zAQ_RWBZ8moZctVdOilKdAeekmBHirmMJLGFhMuAjkM6lr-91JbbCcBD3qjefM4G0-sNg2ygiVJwjUJklhE96Z-RhsJTWihJmG043okHCx0LddcO1-NOHrAg3Bkj-UDKkP4akd7qIo9JM2o9ci1WJec_VBwQBd8g6v2FTo-qM1ci00LtET6ymvyi_UEL7Dg7kit0ZwFVdTNTTrfjSOupwL2QmJ5gfOtM7xN7sVIr9CVv6fvTG2AoAKHgVAbTSA0WleWX1_xVSGPgSRNDTJUMXfkfbGXhIdzlVqSfOkrL2TTR7cigyfqCQ5vHdcpTaTaKCXoA4HBab3uo0vE2-hemdDsPpr78M7tCCzFjkwXWxyNWzWuh8aP1M67NgxVDMVf195fVmOiSfkxbRohi5lCq0A0YTlPXEcRZ9SiQs6KABvcg5fEGdfnQPVdmBV-awQZywqyHmMGnsyvo64Xe-LcCwiboli4ULrwtwP9xxi1kILJihP7y4p1lq-263SzyfI83WTp5zRmR1akd7vVLrvb5Vm-2W6zcM4x-zcqfFplMcMxh5_Toxrf1vk_Byk2hw) + + +---- + +--- + +## Separation of concerns + +- inside the container: + - libreries + - package manager + - application + - data + - code +- outside the container: + - logging + - remote access + - network configuration + - monitoring + +--- diff --git a/docker-101/02-docker.md b/docker-101/02-docker.md new file mode 100644 index 0000000..de238a4 --- /dev/null +++ b/docker-101/02-docker.md @@ -0,0 +1,372 @@ +## Inspect the Docker host + +At first, we will inspect this machine: the operating system, the IP address and the rest. + +```python +# Distro +!cat /etc/issue +``` + +```python +# Linux version +!uname -a +``` + +```python +# IP address +! ip -4 -o address show eth0 +``` + +--- + +## Docker installation + +Check docker tools. + +```python +# Docker CLI version. +!docker --version +``` + +```python +# Docker server version +!dockerd --version +``` + +--- + +## Running a container + +Docker comes with a catalog of possible operating systems and applications named Docker Registry. You can deploy your custom registry too. + +Let's pull and run an image + +[![](https://mermaid.ink/img/pako:eNptU8tu2zAQ_BWCQG6SUT_kODr00hRoD70kQA8Vc6DEtcVaXArkMq1r5d9LPRzbScADZ7XD0exyeeSVVcBznqapQNLUQM7ubbUHxzQSOFmRtugFDoSdk20tUKAP5YDZA-y0J3coHsBYgteYbWWZb2WqBq0ngXpeCP7dyB34mOtTVSjBi15t4jpQtaTTyVAGpHCKfstnecLtgWqLgkdVQHVl55v1JHAsYKsbKM5w-usEr8092yYY8MXPcZ-oSpIspYdIqCyS1AjOF8WXV3xRyFMkNbaSTaxi6sj7Ys-G-3VhLU0_d2XQjerYtUifYR3J3dvEpaWRVFljNH0g0CddwI6dT7w93Rkbm92xqQ_v0p6ko8STbRMHQ3CtJrBv_EBtg6_jpeq--Mvau_NojLSm-Zg2XqHAmxvWk5hExaL9uLOBPYuN1rh_pENs3DK5ZVHT7iGP05OMMP2jFdV5xhNuwBmpVRzwo0DGBKcaDAieR6hgK0NDggt8idTQxvuGr0qTdTwnFyDhMpB9PGB1ikfOvZZx2gyPphsfv7YSf1lrTqQY8vzI__J8nm1mq_liucw2m8UyW9wuEn7g-eJuPVtnd-tNtlmuVllcLwn_Nyh8mmUJh8HDj_FhDu_z5T_yHk5s?type=png)](https://mermaid.live/edit#pako:eNptU8tu2zAQ_BWCQG6SUT_kODr00hRoD70kQA8Vc6DEtcVaXArkMq1r5d9LPRzbScADZ7XD0exyeeSVVcBznqapQNLUQM7ubbUHxzQSOFmRtugFDoSdk20tUKAP5YDZA-y0J3coHsBYgteYbWWZb2WqBq0ngXpeCP7dyB34mOtTVSjBi15t4jpQtaTTyVAGpHCKfstnecLtgWqLgkdVQHVl55v1JHAsYKsbKM5w-usEr8092yYY8MXPcZ-oSpIspYdIqCyS1AjOF8WXV3xRyFMkNbaSTaxi6sj7Ys-G-3VhLU0_d2XQjerYtUifYR3J3dvEpaWRVFljNH0g0CddwI6dT7w93Rkbm92xqQ_v0p6ko8STbRMHQ3CtJrBv_EBtg6_jpeq--Mvau_NojLSm-Zg2XqHAmxvWk5hExaL9uLOBPYuN1rh_pENs3DK5ZVHT7iGP05OMMP2jFdV5xhNuwBmpVRzwo0DGBKcaDAieR6hgK0NDggt8idTQxvuGr0qTdTwnFyDhMpB9PGB1ikfOvZZx2gyPphsfv7YSf1lrTqQY8vzI__J8nm1mq_liucw2m8UyW9wuEn7g-eJuPVtnd-tNtlmuVllcLwn_Nyh8mmUJh8HDj_FhDu_z5T_yHk5s) + +```python +# Search images from catalog +!docker search ubuntu|head +``` + +Download an image from the remote registry. + +```python +# Download the ubuntu image +!docker pull ubuntu:22.04 +``` + +Open a [terminal](/terminals/docker) and run an **interactive shell** (`--interactive`) with a terminal (`--tty`) +in a new container based on `ubuntu:22.04`. The `--rm` option removes the container when it exits. + +**NB: run the following commands in the terminal, not in the jupyter notebook** + +```bash +docker run --rm --tty --interactive ubuntu:22.04 /bin/bash +``` + +Then run the above commands to ensure that you are on another virtual hosts. + +```bash +# Operating System +cat /etc/issue +# Linux version +uname -a +``` + +--- + +#### Exercise + +What happens if you try to get the IP address using the following command? + +```bash +ip -4 -o a +``` + +Can you retrieve the IP address in another way? +**Hint: the `/proc` filesystem contains information about the system. Try to find the IP address in one of the files in `/proc`.** + +--- + +## Dockerizing Applications + +`busybox` is a lightweight Linux distribution. You can run an one-shot command in a container. + +```bash +docker run busybox /bin/echo 'Hello world' +``` + +--- + +## Docker commands + +Here is a list of docker commands. + +```text +docker create # creates a container but does not start it. +docker run # creates and starts a container. +docker stop # stops it. +docker start # will start it again. +docker restart # restarts a container. +docker rm # deletes a container. +docker kill # sends a SIGKILL to a container. +docker attach # will connect to a running container. +docker wait # blocks until container stops. +docker exec # executes a command in a running container. +``` + +You can inspect containers + +```text +docker ps # shows running containers. +docker inspect # information on a container (incl. IP address). +docker logs # gets logs from container. +docker events # gets events from container. +docker port # shows public facing port of container. +docker top # shows running processes in container. +docker diff # shows changed files in container's FS. +docker stats # shows metrics, memory, cpu, filsystem +``` + +#### Exercise + +1. List the running containers. + +```python +# Use this cell to run the correct docker command. +``` + +1. Inspect the `ansible-101_bastion_1` container. +1. Use the `--format` option to get the IP address on a [terminal](/terminals/docker). + **Hint 1: you can google for the solution.** +1. Try to run the `docker inspect --format ...` command above in the jupyter notebook. + Does it work? If not, why? + +```python +# Use this cell to run the correct docker command. +``` + +💬 Discuss how python treats the `{` character. + +--- + +## Images + +### Like VMs template images + +Docker leverages a [copy-on-write filesystem](https://docs.docker.com/storage/storagedriver/). +This allows Docker to instantiate containers very quickly. +Docker use to layering one container filesytem on top of another. +For example, you might create a container that is based on +a base Debian image, and then in turn create another container that is +based on the first container. + +[![](https://mermaid.ink/img/pako:eNpVkstO6zAQhl_FmnVS2U7iNFmwQLBAiBVIR6I-QtPYoRaxXSW2RE_Vd8ckp7Ts5vLN7dccofNKQwvvI-530kk3xe1sk867gMbpcfNnNAG3g76EyIAHPf6VLnyGzcvOTEuAeDccztQkU3qnSW8GPZGww0Bw1MR6ZXqjVSrWTn1PvJppLL7rt7nX5sHauIyN2-hCbDlf0XJBSI9tj_ngu4_UZ4uTznl-Xdw0VVV0NSGMPt6eEfYLqRWvFWWE8OLpB6G_kJ6KAouCEEFn5P_CFxny_OZ65-8kZGD1aNGopOpROkIkJBmsltAmU-ke4xAkSHdKaNwrDPpemeBHaMMYdQYYg38-uO7sL8ydwaSRhXT3MKXoHt2r9_YMJRfaI3xCy8pyVdKqYrTkxbqhTQYHaHlTrUTViIILwfma1acM_s31dCVYxWnFxLrmJRWUZaDnhZ6W15g_5PQF27i6QA?type=png)](https://mermaid.live/edit#pako:eNpVkstO6zAQhl_FmnVS2U7iNFmwQLBAiBVIR6I-QtPYoRaxXSW2RE_Vd8ckp7Ts5vLN7dccofNKQwvvI-530kk3xe1sk867gMbpcfNnNAG3g76EyIAHPf6VLnyGzcvOTEuAeDccztQkU3qnSW8GPZGww0Bw1MR6ZXqjVSrWTn1PvJppLL7rt7nX5sHauIyN2-hCbDlf0XJBSI9tj_ngu4_UZ4uTznl-Xdw0VVV0NSGMPt6eEfYLqRWvFWWE8OLpB6G_kJ6KAouCEEFn5P_CFxny_OZ65-8kZGD1aNGopOpROkIkJBmsltAmU-ke4xAkSHdKaNwrDPpemeBHaMMYdQYYg38-uO7sL8ydwaSRhXT3MKXoHt2r9_YMJRfaI3xCy8pyVdKqYrTkxbqhTQYHaHlTrUTViIILwfma1acM_s31dCVYxWnFxLrmJRWUZaDnhZ6W15g_5PQF27i6QA) + +This mechanism allows an efficient storage usage, since multiple containers can +reuse the same filesystem layers. + +[![](https://mermaid.ink/img/pako:eNqlkk2P0zAQhv_KyAdOSRU7cb4OHBY4ILQnONEg5MaTrUVsV44ttnT733GTDaQSN-LL2HnmnZnXvpDeSiQteXLidOxMZ6ZwmGPorfFCGXR0_24NgcJPp7w4jAijOKP71hn_7On-y1FNywlYM57X5KmL_48IgxpxAn8UHoRD0FaqQaGM2Wjkv2qyTU22LcX-s9TdiEqLJ_w-a-0_ah2WwcIhGB9axnZZsSAwiHYQ6Wj7H1HnICZMWbpNbhrO874CoNmnhxWhd0glWSUzCsDyxz9IdocMWZmLPAcosxl5bfjvRcCbjUOQpm_hxaGQ6c2HOL7DMCHchNe2b168bMe86ZGEaHRaKBnv_dIZgI5E5zR2pI2hxEGE0XekM9eIhpMUHj9I5a0jrXcBEyKCt5_Ppl_3C_NeiWirJtGqcYqnJ2G-WqtXKG5JeyHPpKW83hWU5Tmva5ZzVrGEnEnLmnJX8qaseZ0XBY_rmpBfs0K249nyFVXDGOc0ITi39Lg83_kVX38DC_v2ew?type=png)](https://mermaid.live/edit#pako:eNqlkk2P0zAQhv_KyAdOSRU7cb4OHBY4ILQnONEg5MaTrUVsV44ttnT733GTDaQSN-LL2HnmnZnXvpDeSiQteXLidOxMZ6ZwmGPorfFCGXR0_24NgcJPp7w4jAijOKP71hn_7On-y1FNywlYM57X5KmL_48IgxpxAn8UHoRD0FaqQaGM2Wjkv2qyTU22LcX-s9TdiEqLJ_w-a-0_ah2WwcIhGB9axnZZsSAwiHYQ6Wj7H1HnICZMWbpNbhrO874CoNmnhxWhd0glWSUzCsDyxz9IdocMWZmLPAcosxl5bfjvRcCbjUOQpm_hxaGQ6c2HOL7DMCHchNe2b168bMe86ZGEaHRaKBnv_dIZgI5E5zR2pI2hxEGE0XekM9eIhpMUHj9I5a0jrXcBEyKCt5_Ppl_3C_NeiWirJtGqcYqnJ2G-WqtXKG5JeyHPpKW83hWU5Tmva5ZzVrGEnEnLmnJX8qaseZ0XBY_rmpBfs0K249nyFVXDGOc0ITi39Lg83_kVX38DC_v2ew) + +---- + + +## Commands for interacting with images + +```text +docker images # shows all images. +docker import # creates an image from a tarball. +docker build # creates image from Dockerfile. +docker commit # creates image from a container. +docker rmi # removes an image. +docker history # list changes of an image. +``` +---- + +Images are stored in a Docker registry such as [Docker Hub](https://hub.docker.com/) +or Github's [Container Registry](). You can also host your own registry. + +`docker commit` can create an image from a container: +this is the process used when building a `Dockerfile`. + +NB. This is similar to the snapshot feature of Virtual Machines. + +[![](https://mermaid.ink/img/pako:eNqtlV1vmzAUhv-K5WuIwDak4WIXXXcxbd3FVmnS4qpysEmsgImwUcu6_vc5diCQfkxre3fMeY3f8_jj3MO85gJmcN2w3QZcnVNFVV4yrS9EAVYlU1ugTVNvRaZqJYJClqWLRqr1ptbmoAo50xvWNKzLQDJWU6XblV9EVuubdtUq0y4_V1Vr2KoUwH_IEJpFxCrYWoCCZQULyzrfXlPFZSNyI2sFvn6nasW0CFHodDcl60SzXCySBOdzAOLoy_n1QRJPJHOO5jyKAUD4cpBEE0kRpZhhDEAaOYlQ_JF3JW6X38StjXvzL_rdM41Dc2eWVxupgVsH1KrsQF4rw6TSlCqzEaCquSyk4MBiE_p6sm4cekA3eb3r_gNbHDaiWIK-ktNyGrGWduO6U77HLfKxLXmYPJrO7SKiiZcffSG2LvSo9EGMQi71dvmzkd56PkzySLxvzmzW7svJfwBV6FUMnemRh9dhRE9gPKFARhTi5ymQN1Ig70CBvJICefIwPSKBRiTw8yTwG0ngdyCBJyT-eanxy7dpqEBbs_5q9AHqAzJMc28CCMMP08u9P2x7MC6DphlyzJBpBh8zeJo5GNmn_vjY-qwqaSyhvBHMCP2nf9bAYBeEM6tv9Sg5VHCa6x-KYaWn04eOAdyTFLgbFTiggTtXvtkcRaMiggmGYFJgMAHhexFVMICVaComuW1t91QBQKE9DpWgMLMhFwVrS0MhVQ9W2u7sKROfuDR1AzPTtCKArDX1j07l_dhrLiSzW11BeyZKbb_umPpV11UvskOY3cM7mKGEzNJFksYoTlCULlIcwA5mcURmhJAIz9P5GUJnCX4I4G_3h2g2X0QojhYpQvtWhlAAhbN06Tu0a9QPfwEgtsUC?type=png)](https://mermaid.live/edit#pako:eNqtlV1vmzAUhv-K5WuIwDak4WIXXXcxbd3FVmnS4qpysEmsgImwUcu6_vc5diCQfkxre3fMeY3f8_jj3MO85gJmcN2w3QZcnVNFVV4yrS9EAVYlU1ugTVNvRaZqJYJClqWLRqr1ptbmoAo50xvWNKzLQDJWU6XblV9EVuubdtUq0y4_V1Vr2KoUwH_IEJpFxCrYWoCCZQULyzrfXlPFZSNyI2sFvn6nasW0CFHodDcl60SzXCySBOdzAOLoy_n1QRJPJHOO5jyKAUD4cpBEE0kRpZhhDEAaOYlQ_JF3JW6X38StjXvzL_rdM41Dc2eWVxupgVsH1KrsQF4rw6TSlCqzEaCquSyk4MBiE_p6sm4cekA3eb3r_gNbHDaiWIK-ktNyGrGWduO6U77HLfKxLXmYPJrO7SKiiZcffSG2LvSo9EGMQi71dvmzkd56PkzySLxvzmzW7svJfwBV6FUMnemRh9dhRE9gPKFARhTi5ymQN1Ig70CBvJICefIwPSKBRiTw8yTwG0ngdyCBJyT-eanxy7dpqEBbs_5q9AHqAzJMc28CCMMP08u9P2x7MC6DphlyzJBpBh8zeJo5GNmn_vjY-qwqaSyhvBHMCP2nf9bAYBeEM6tv9Sg5VHCa6x-KYaWn04eOAdyTFLgbFTiggTtXvtkcRaMiggmGYFJgMAHhexFVMICVaComuW1t91QBQKE9DpWgMLMhFwVrS0MhVQ9W2u7sKROfuDR1AzPTtCKArDX1j07l_dhrLiSzW11BeyZKbb_umPpV11UvskOY3cM7mKGEzNJFksYoTlCULlIcwA5mcURmhJAIz9P5GUJnCX4I4G_3h2g2X0QojhYpQvtWhlAAhbN06Tu0a9QPfwEgtsUC) + +--- + +## Building images with Dockerfile + +The Dockerfile is a text file that contains all the commands a user could +call on the command line to assemble an image. + +``` +! cat Dockerfile +``` + +Docker will execute all the commands in the Dockerfile and create a new image. + +```python +!docker build -t apache-example . +``` + +Open the [terminal](/terminals/docker) and: + +- inspect the image +- run the image with + +```bash +docker run --rm --detach apache-example +``` + +#### Exercise + +Open the [terminal](/terminals/docker) and: + +- check if the container is running `docker ps` +- use `curl` to check if the webserver works +- stop the container with `docker stop` +- check the container status with `docker ps -a` +- remove the container with `docker rm` + +--- + +## docker-compose + +[Docker Compose](https://docs.docker.com/compose/) is a tool for defining and running complex applications with +Docker. + +With Compose, you define a multi-container application in a +single file, then spin your application up in a single command which +does everything that needs to be done to get it running. + +---- + +## docker-compose + +Using Compose is basically a three-step process: + +1. define your app's image with a `Dockerfile` +1. define the services that make up your app in +`docker-compose.yaml` so they can be run together in an isolated +environment. +1. run `docker-compose up` and Compose will start and run your + entire app. + +---- + +## docker-compose + +docker-compose.yaml + +```docker-compose +version: '2' +services: + web: + build: . + command: python app.py + ports: + - "5000:5000" + volumes: + - .:/code + links: + - redis + redis: + image: redis +``` + +Now run docker-compose up and Compose will start and run your entire app. + +Docker Compose is a basic example of Infrastructure as Code. +The infrastructure setup is defined in a file and can be versioned. +All the changes are tracked and can be reverted. +Administrators do not have to run commands on the server to setup the infrastructure. + +```bash +docker-compose up -d +``` + +--- + +## docker-compose example + +``` +version: "2" +services: + web: + image: piuma/phpsysinfo + ports: + - "80:80" +``` + +---- + +## docker-compose example + +```docker-compose +version: "2" +services: + web1: + image: piuma/phpsysinfo + web2: + image: piuma/phpsysinfo + proxy: + image: tutum/haproxy + links: + - web1 + - web2 + ports: + - "80:80" +``` + +---- + +## docker-compose example + +``` +version: "2" +services: + web1: + image: piuma/phpsysinfo + web2: + image: piuma/phpsysinfo + web3: + image: piuma/phpsysinfo + proxy: + image: tutum/haproxy + links: + - web1 + - web2 + - web3 + ports: + - "80:80" +``` + +---- + +## docker-compose example + +``` +version: "2" +services: + phpmyadmin: + image: nazarpc/phpmyadmin + links: + - mysql + ports: + - "8080:80" + mysql: + image: mysql + environment: + - MYSQL_ROOT_PASSWORD=secret +``` + +--- + +## What next? + +```python +!docker run --rm mribeiro/cowsay "Any questions?" +``` + +--- diff --git a/docker-101/Dockerfile b/docker-101/Dockerfile new file mode 100644 index 0000000..8f70b36 --- /dev/null +++ b/docker-101/Dockerfile @@ -0,0 +1,22 @@ +# +# This is a simple Dockerfile to build an image with Apache2. +# You can build it with: +# +# $ docker build -t test-apache2 . +# +FROM debian:stable-slim + +# Set the maintainer. See https://docs.docker.com/engine/reference/builder/#maintainer-deprecated +LABEL org.opencontainers.image.authors="piuma@piumalab.org" + +# Install Apache2 and finally clean up the APT cache. +# There are different opinions on whether to `apt-get -y upgrade` or not. +# Check this interesting discussion: https://github.com/docker/docs/pull/12571 +RUN apt-get update && apt-get -y install apache2 && apt-get clean + +# Expose the default ports. https://docs.docker.com/engine/reference/builder/#expose +EXPOSE 80/tcp +EXPOSE 443/tcp + +# Set the default command to run when starting the container +CMD ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] diff --git a/docker-101/Makefile b/docker-101/Makefile new file mode 100644 index 0000000..7d2e24f --- /dev/null +++ b/docker-101/Makefile @@ -0,0 +1,11 @@ +FILES=$(wildcard *.md) +BOOKS = $(patsubst %.md,notebooks/%.ipynb,$(FILES)) + + +all: $(BOOKS) + @echo $(BOOKS) + cp Dockerfile notebooks/Dockerfile + +notebooks/%.ipynb: %.md + + notedown --to notebook $^ > $@ diff --git a/docker-101/notebooks/01-docker.ipynb b/docker-101/notebooks/01-docker.ipynb new file mode 100644 index 0000000..d2c5383 --- /dev/null +++ b/docker-101/notebooks/01-docker.ipynb @@ -0,0 +1,190 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "bd72a566", + "metadata": {}, + "source": [ + "# Docker 101\n", + "\n", + "This is a crash docker course.\n", + "\n", + "## What is?\n", + "\n", + "Docker is a platform for developers and sysadmins to develop,\n", + " ship, and run applications." + ] + }, + { + "cell_type": "markdown", + "id": "819672e8", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Why it was born?\n", + "\n", + "To resolve a necessity\n", + "\n", + "Mobility workload in cloud systems\n", + "\n", + "- Cargo transport pre-1960\n", + "- Solution: Intermodal Shipping Container" + ] + }, + { + "cell_type": "markdown", + "id": "c8a2c20e", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "### Docker is a Container System for Code\n", + "\n", + "Docker leverages LXC (Linux Containers), which encompasses Linux\n", + "features like cgroups and namespaces for strong process isolation and\n", + "resource control.\n", + "\n", + "They are lightweight and consume less resources than a virtual machine." + ] + }, + { + "cell_type": "markdown", + "id": "69ef6e0c", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Benefits for developers\n", + "\n", + "- portability, build once... run anywhere\n", + "- no worries about missing dependencies, packages and other pain\n", + " points during subsequent deployments\n", + "- tracking changes\n", + "- run each app in its own isolated container, so you can run various\n", + " versions of libraries and other dependencies for each app without\n", + " worrying\n", + "- reduce/eliminate concerns about compatibility on different\n", + " platforms, either your own or your customers'." + ] + }, + { + "cell_type": "markdown", + "id": "6fbc76a7", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Benefits for sysadmins\n", + "\n", + "- portability, configure once... run anywhere\n", + "- tracking changes\n", + "- simple to understand what application do\n", + "- simplify to upgrade application processes. Significantly improves\n", + " the speed and reliability of continuous deployment and continuous\n", + " integration systems\n", + "- eliminate inconsistencies between different enviroments (develop, testing, production)\n", + "- re-use other people images\n", + "- do you not need an hypervisor. Because the containers are so\n", + " lightweight, address significant performance, costs, deployment, and\n", + " portability issues normally associated with VMs" + ] + }, + { + "cell_type": "markdown", + "id": "bfae8933", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Docker Concepts and Interactions\n", + "\n", + "- **Host**, the machine that is running the containers.\n", + "- **Image**, a hierarchy of files, with meta-data for how to run a container.\n", + "- **Container**, a contained running process, started from an image.\n", + "- **Registry**, a repository of images.\n", + "- **Volume**, storage outside the container.\n", + "- **Dockerfile**, a script for creating images." + ] + }, + { + "cell_type": "markdown", + "id": "fed50018", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNptU8lu2zAQ_RWBZ8moZctVdOilKdAeekmBHirmMJLGFhMuAjkM6lr-91JbbCcBD3qjefM4G0-sNg2ygiVJwjUJklhE96Z-RhsJTWihJmG043okHCx0LddcO1-NOHrAg3Bkj-UDKkP4akd7qIo9JM2o9ci1WJec_VBwQBd8g6v2FTo-qM1ci00LtET6ymvyi_UEL7Dg7kit0ZwFVdTNTTrfjSOupwL2QmJ5gfOtM7xN7sVIr9CVv6fvTG2AoAKHgVAbTSA0WleWX1_xVSGPgSRNDTJUMXfkfbGXhIdzlVqSfOkrL2TTR7cigyfqCQ5vHdcpTaTaKCXoA4HBab3uo0vE2-hemdDsPpr78M7tCCzFjkwXWxyNWzWuh8aP1M67NgxVDMVf195fVmOiSfkxbRohi5lCq0A0YTlPXEcRZ9SiQs6KABvcg5fEGdfnQPVdmBV-awQZywqyHmMGnsyvo64Xe-LcCwiboli4ULrwtwP9xxi1kILJihP7y4p1lq-263SzyfI83WTp5zRmR1akd7vVLrvb5Vm-2W6zcM4x-zcqfFplMcMxh5_Toxrf1vk_Byk2hw?type=png)](https://mermaid.live/edit#pako:eNptU8lu2zAQ_RWBZ8moZctVdOilKdAeekmBHirmMJLGFhMuAjkM6lr-91JbbCcBD3qjefM4G0-sNg2ygiVJwjUJklhE96Z-RhsJTWihJmG043okHCx0LddcO1-NOHrAg3Bkj-UDKkP4akd7qIo9JM2o9ci1WJec_VBwQBd8g6v2FTo-qM1ci00LtET6ymvyi_UEL7Dg7kit0ZwFVdTNTTrfjSOupwL2QmJ5gfOtM7xN7sVIr9CVv6fvTG2AoAKHgVAbTSA0WleWX1_xVSGPgSRNDTJUMXfkfbGXhIdzlVqSfOkrL2TTR7cigyfqCQ5vHdcpTaTaKCXoA4HBab3uo0vE2-hemdDsPpr78M7tCCzFjkwXWxyNWzWuh8aP1M67NgxVDMVf195fVmOiSfkxbRohi5lCq0A0YTlPXEcRZ9SiQs6KABvcg5fEGdfnQPVdmBV-awQZywqyHmMGnsyvo64Xe-LcCwiboli4ULrwtwP9xxi1kILJihP7y4p1lq-263SzyfI83WTp5zRmR1akd7vVLrvb5Vm-2W6zcM4x-zcqfFplMcMxh5_Toxrf1vk_Byk2hw)" + ] + }, + { + "cell_type": "markdown", + "id": "cd2f36a2", + "metadata": {}, + "source": [ + "----" + ] + }, + { + "cell_type": "markdown", + "id": "30d28c29", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Separation of concerns\n", + "\n", + "- inside the container:\n", + " - libreries\n", + " - package manager\n", + " - application\n", + " - data\n", + " - code\n", + "- outside the container:\n", + " - logging\n", + " - remote access\n", + " - network configuration\n", + " - monitoring" + ] + }, + { + "cell_type": "markdown", + "id": "b1141f06", + "metadata": {}, + "source": [ + "---" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docker-101/notebooks/02-docker.ipynb b/docker-101/notebooks/02-docker.ipynb new file mode 100644 index 0000000..401fd03 --- /dev/null +++ b/docker-101/notebooks/02-docker.ipynb @@ -0,0 +1,832 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "eb5b8406", + "metadata": {}, + "source": [ + "## Inspect the Docker host\n", + "\n", + "At first, we will inspect this machine: the operating system, the IP address and the rest." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4898c92a", + "metadata": {}, + "outputs": [], + "source": [ + "# Distro\n", + "!cat /etc/issue" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba0748fc", + "metadata": {}, + "outputs": [], + "source": [ + "# Linux version\n", + "!uname -a" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3bdab09c", + "metadata": {}, + "outputs": [], + "source": [ + "# IP address\n", + "! ip -4 -o address show eth0" + ] + }, + { + "cell_type": "markdown", + "id": "0c69d0dd", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Docker installation\n", + "\n", + "Check docker tools." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d480e5f2", + "metadata": {}, + "outputs": [], + "source": [ + "# Docker CLI version.\n", + "!docker --version" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "269567ae", + "metadata": {}, + "outputs": [], + "source": [ + "# Docker server version\n", + "!dockerd --version" + ] + }, + { + "cell_type": "markdown", + "id": "fe7be5d8", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Running a container\n", + "\n", + "Docker comes with a catalog of possible operating systems and applications named Docker Registry. You can deploy your custom registry too.\n", + "\n", + "Let's pull and run an image\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNptU8tu2zAQ_BWCQG6SUT_kODr00hRoD70kQA8Vc6DEtcVaXArkMq1r5d9LPRzbScADZ7XD0exyeeSVVcBznqapQNLUQM7ubbUHxzQSOFmRtugFDoSdk20tUKAP5YDZA-y0J3coHsBYgteYbWWZb2WqBq0ngXpeCP7dyB34mOtTVSjBi15t4jpQtaTTyVAGpHCKfstnecLtgWqLgkdVQHVl55v1JHAsYKsbKM5w-usEr8092yYY8MXPcZ-oSpIspYdIqCyS1AjOF8WXV3xRyFMkNbaSTaxi6sj7Ys-G-3VhLU0_d2XQjerYtUifYR3J3dvEpaWRVFljNH0g0CddwI6dT7w93Rkbm92xqQ_v0p6ko8STbRMHQ3CtJrBv_EBtg6_jpeq--Mvau_NojLSm-Zg2XqHAmxvWk5hExaL9uLOBPYuN1rh_pENs3DK5ZVHT7iGP05OMMP2jFdV5xhNuwBmpVRzwo0DGBKcaDAieR6hgK0NDggt8idTQxvuGr0qTdTwnFyDhMpB9PGB1ikfOvZZx2gyPphsfv7YSf1lrTqQY8vzI__J8nm1mq_liucw2m8UyW9wuEn7g-eJuPVtnd-tNtlmuVllcLwn_Nyh8mmUJh8HDj_FhDu_z5T_yHk5s?type=png)](https://mermaid.live/edit#pako:eNptU8tu2zAQ_BWCQG6SUT_kODr00hRoD70kQA8Vc6DEtcVaXArkMq1r5d9LPRzbScADZ7XD0exyeeSVVcBznqapQNLUQM7ubbUHxzQSOFmRtugFDoSdk20tUKAP5YDZA-y0J3coHsBYgteYbWWZb2WqBq0ngXpeCP7dyB34mOtTVSjBi15t4jpQtaTTyVAGpHCKfstnecLtgWqLgkdVQHVl55v1JHAsYKsbKM5w-usEr8092yYY8MXPcZ-oSpIspYdIqCyS1AjOF8WXV3xRyFMkNbaSTaxi6sj7Ys-G-3VhLU0_d2XQjerYtUifYR3J3dvEpaWRVFljNH0g0CddwI6dT7w93Rkbm92xqQ_v0p6ko8STbRMHQ3CtJrBv_EBtg6_jpeq--Mvau_NojLSm-Zg2XqHAmxvWk5hExaL9uLOBPYuN1rh_pENs3DK5ZVHT7iGP05OMMP2jFdV5xhNuwBmpVRzwo0DGBKcaDAieR6hgK0NDggt8idTQxvuGr0qTdTwnFyDhMpB9PGB1ikfOvZZx2gyPphsfv7YSf1lrTqQY8vzI__J8nm1mq_liucw2m8UyW9wuEn7g-eJuPVtnd-tNtlmuVllcLwn_Nyh8mmUJh8HDj_FhDu_z5T_yHk5s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ca14dd5", + "metadata": {}, + "outputs": [], + "source": [ + "# Search images from catalog\n", + "!docker search ubuntu|head" + ] + }, + { + "cell_type": "markdown", + "id": "3de53199", + "metadata": {}, + "source": [ + "Download an image from the remote registry." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0ea32a09", + "metadata": {}, + "outputs": [], + "source": [ + "# Download the ubuntu image\n", + "!docker pull ubuntu:22.04" + ] + }, + { + "cell_type": "markdown", + "id": "6e6ab248", + "metadata": {}, + "source": [ + "Open a [terminal](/terminals/docker) and run an **interactive shell** (`--interactive`) with a terminal (`--tty`)\n", + "in a new container based on `ubuntu:22.04`. The `--rm` option removes the container when it exits.\n", + "\n", + "**NB: run the following commands in the terminal, not in the jupyter notebook**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d543fe9", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "docker run --rm --tty --interactive ubuntu:22.04 /bin/bash" + ] + }, + { + "cell_type": "markdown", + "id": "336b13a3", + "metadata": {}, + "source": [ + "Then run the above commands to ensure that you are on another virtual hosts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ac722e8", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "# Operating System\n", + "cat /etc/issue\n", + "# Linux version\n", + "uname -a" + ] + }, + { + "cell_type": "markdown", + "id": "cf05b683", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "#### Exercise\n", + "\n", + "What happens if you try to get the IP address using the following command?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8075910d", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "ip -4 -o a" + ] + }, + { + "cell_type": "markdown", + "id": "bb09c003", + "metadata": {}, + "source": [ + "Can you retrieve the IP address in another way?\n", + "**Hint: the `/proc` filesystem contains information about the system. Try to find the IP address in one of the files in `/proc`.**" + ] + }, + { + "cell_type": "markdown", + "id": "38f7fd1b", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Dockerizing Applications\n", + "\n", + "`busybox` is a lightweight Linux distribution. You can run an one-shot command in a container." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc56d63a", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "docker run busybox /bin/echo 'Hello world'" + ] + }, + { + "cell_type": "markdown", + "id": "e252e3ee", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Docker commands\n", + "\n", + "Here is a list of docker commands." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12fb7df7", + "metadata": { + "attributes": { + "classes": [ + "text" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "docker create # creates a container but does not start it.\n", + "docker run # creates and starts a container.\n", + "docker stop # stops it.\n", + "docker start # will start it again.\n", + "docker restart # restarts a container.\n", + "docker rm # deletes a container.\n", + "docker kill # sends a SIGKILL to a container.\n", + "docker attach # will connect to a running container.\n", + "docker wait # blocks until container stops.\n", + "docker exec # executes a command in a running container." + ] + }, + { + "cell_type": "markdown", + "id": "419f82ed", + "metadata": {}, + "source": [ + "You can inspect containers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bdaaa415", + "metadata": { + "attributes": { + "classes": [ + "text" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "docker ps # shows running containers.\n", + "docker inspect # information on a container (incl. IP address).\n", + "docker logs # gets logs from container.\n", + "docker events # gets events from container.\n", + "docker port # shows public facing port of container.\n", + "docker top # shows running processes in container.\n", + "docker diff # shows changed files in container's FS.\n", + "docker stats # shows metrics, memory, cpu, filsystem" + ] + }, + { + "cell_type": "markdown", + "id": "17e88f16", + "metadata": {}, + "source": [ + "#### Exercise\n", + "\n", + "1. List the running containers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7eefeab", + "metadata": {}, + "outputs": [], + "source": [ + "# Use this cell to run the correct docker command." + ] + }, + { + "cell_type": "markdown", + "id": "ad3bbc4d", + "metadata": {}, + "source": [ + "1. Inspect the `ansible-101_bastion_1` container.\n", + "1. Use the `--format` option to get the IP address on a [terminal](/terminals/docker).\n", + " **Hint 1: you can google for the solution.**\n", + "1. Try to run the `docker inspect --format ...` command above in the jupyter notebook.\n", + " Does it work? If not, why?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90c84b58", + "metadata": {}, + "outputs": [], + "source": [ + "# Use this cell to run the correct docker command." + ] + }, + { + "cell_type": "markdown", + "id": "d0204b9a", + "metadata": {}, + "source": [ + "💬 Discuss how python treats the `{` character." + ] + }, + { + "cell_type": "markdown", + "id": "5a76021d", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Images\n", + "\n", + "### Like VMs template images\n", + "\n", + "Docker leverages a [copy-on-write filesystem](https://docs.docker.com/storage/storagedriver/).\n", + "This allows Docker to instantiate containers very quickly.\n", + "Docker use to layering one container filesytem on top of another.\n", + "For example, you might create a container that is based on\n", + "a base Debian image, and then in turn create another container that is\n", + "based on the first container.\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNpVkstO6zAQhl_FmnVS2U7iNFmwQLBAiBVIR6I-QtPYoRaxXSW2RE_Vd8ckp7Ts5vLN7dccofNKQwvvI-530kk3xe1sk867gMbpcfNnNAG3g76EyIAHPf6VLnyGzcvOTEuAeDccztQkU3qnSW8GPZGww0Bw1MR6ZXqjVSrWTn1PvJppLL7rt7nX5sHauIyN2-hCbDlf0XJBSI9tj_ngu4_UZ4uTznl-Xdw0VVV0NSGMPt6eEfYLqRWvFWWE8OLpB6G_kJ6KAouCEEFn5P_CFxny_OZ65-8kZGD1aNGopOpROkIkJBmsltAmU-ke4xAkSHdKaNwrDPpemeBHaMMYdQYYg38-uO7sL8ydwaSRhXT3MKXoHt2r9_YMJRfaI3xCy8pyVdKqYrTkxbqhTQYHaHlTrUTViIILwfma1acM_s31dCVYxWnFxLrmJRWUZaDnhZ6W15g_5PQF27i6QA?type=png)](https://mermaid.live/edit#pako:eNpVkstO6zAQhl_FmnVS2U7iNFmwQLBAiBVIR6I-QtPYoRaxXSW2RE_Vd8ckp7Ts5vLN7dccofNKQwvvI-530kk3xe1sk867gMbpcfNnNAG3g76EyIAHPf6VLnyGzcvOTEuAeDccztQkU3qnSW8GPZGww0Bw1MR6ZXqjVSrWTn1PvJppLL7rt7nX5sHauIyN2-hCbDlf0XJBSI9tj_ngu4_UZ4uTznl-Xdw0VVV0NSGMPt6eEfYLqRWvFWWE8OLpB6G_kJ6KAouCEEFn5P_CFxny_OZ65-8kZGD1aNGopOpROkIkJBmsltAmU-ke4xAkSHdKaNwrDPpemeBHaMMYdQYYg38-uO7sL8ydwaSRhXT3MKXoHt2r9_YMJRfaI3xCy8pyVdKqYrTkxbqhTQYHaHlTrUTViIILwfma1acM_s31dCVYxWnFxLrmJRWUZaDnhZ6W15g_5PQF27i6QA)\n", + "\n", + "This mechanism allows an efficient storage usage, since multiple containers can\n", + "reuse the same filesystem layers.\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNqlkk2P0zAQhv_KyAdOSRU7cb4OHBY4ILQnONEg5MaTrUVsV44ttnT733GTDaQSN-LL2HnmnZnXvpDeSiQteXLidOxMZ6ZwmGPorfFCGXR0_24NgcJPp7w4jAijOKP71hn_7On-y1FNywlYM57X5KmL_48IgxpxAn8UHoRD0FaqQaGM2Wjkv2qyTU22LcX-s9TdiEqLJ_w-a-0_ah2WwcIhGB9axnZZsSAwiHYQ6Wj7H1HnICZMWbpNbhrO874CoNmnhxWhd0glWSUzCsDyxz9IdocMWZmLPAcosxl5bfjvRcCbjUOQpm_hxaGQ6c2HOL7DMCHchNe2b168bMe86ZGEaHRaKBnv_dIZgI5E5zR2pI2hxEGE0XekM9eIhpMUHj9I5a0jrXcBEyKCt5_Ppl_3C_NeiWirJtGqcYqnJ2G-WqtXKG5JeyHPpKW83hWU5Tmva5ZzVrGEnEnLmnJX8qaseZ0XBY_rmpBfs0K249nyFVXDGOc0ITi39Lg83_kVX38DC_v2ew?type=png)](https://mermaid.live/edit#pako:eNqlkk2P0zAQhv_KyAdOSRU7cb4OHBY4ILQnONEg5MaTrUVsV44ttnT733GTDaQSN-LL2HnmnZnXvpDeSiQteXLidOxMZ6ZwmGPorfFCGXR0_24NgcJPp7w4jAijOKP71hn_7On-y1FNywlYM57X5KmL_48IgxpxAn8UHoRD0FaqQaGM2Wjkv2qyTU22LcX-s9TdiEqLJ_w-a-0_ah2WwcIhGB9axnZZsSAwiHYQ6Wj7H1HnICZMWbpNbhrO874CoNmnhxWhd0glWSUzCsDyxz9IdocMWZmLPAcosxl5bfjvRcCbjUOQpm_hxaGQ6c2HOL7DMCHchNe2b168bMe86ZGEaHRaKBnv_dIZgI5E5zR2pI2hxEGE0XekM9eIhpMUHj9I5a0jrXcBEyKCt5_Ppl_3C_NeiWirJtGqcYqnJ2G-WqtXKG5JeyHPpKW83hWU5Tmva5ZzVrGEnEnLmnJX8qaseZ0XBY_rmpBfs0K249nyFVXDGOc0ITi39Lg83_kVX38DC_v2ew)" + ] + }, + { + "cell_type": "markdown", + "id": "5a4f0b77", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "\n", + "## Commands for interacting with images" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "858ca912", + "metadata": { + "attributes": { + "classes": [ + "text" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "docker images # shows all images.\n", + "docker import # creates an image from a tarball.\n", + "docker build # creates image from Dockerfile.\n", + "docker commit # creates image from a container.\n", + "docker rmi # removes an image.\n", + "docker history # list changes of an image." + ] + }, + { + "cell_type": "markdown", + "id": "9883df86", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "Images are stored in a Docker registry such as [Docker Hub](https://hub.docker.com/)\n", + "or Github's [Container Registry](). You can also host your own registry.\n", + "\n", + "`docker commit` can create an image from a container:\n", + "this is the process used when building a `Dockerfile`.\n", + "\n", + "NB. This is similar to the snapshot feature of Virtual Machines.\n", + "\n", + "[![](https://mermaid.ink/img/pako:eNqtlV1vmzAUhv-K5WuIwDak4WIXXXcxbd3FVmnS4qpysEmsgImwUcu6_vc5diCQfkxre3fMeY3f8_jj3MO85gJmcN2w3QZcnVNFVV4yrS9EAVYlU1ugTVNvRaZqJYJClqWLRqr1ptbmoAo50xvWNKzLQDJWU6XblV9EVuubdtUq0y4_V1Vr2KoUwH_IEJpFxCrYWoCCZQULyzrfXlPFZSNyI2sFvn6nasW0CFHodDcl60SzXCySBOdzAOLoy_n1QRJPJHOO5jyKAUD4cpBEE0kRpZhhDEAaOYlQ_JF3JW6X38StjXvzL_rdM41Dc2eWVxupgVsH1KrsQF4rw6TSlCqzEaCquSyk4MBiE_p6sm4cekA3eb3r_gNbHDaiWIK-ktNyGrGWduO6U77HLfKxLXmYPJrO7SKiiZcffSG2LvSo9EGMQi71dvmzkd56PkzySLxvzmzW7svJfwBV6FUMnemRh9dhRE9gPKFARhTi5ymQN1Ig70CBvJICefIwPSKBRiTw8yTwG0ngdyCBJyT-eanxy7dpqEBbs_5q9AHqAzJMc28CCMMP08u9P2x7MC6DphlyzJBpBh8zeJo5GNmn_vjY-qwqaSyhvBHMCP2nf9bAYBeEM6tv9Sg5VHCa6x-KYaWn04eOAdyTFLgbFTiggTtXvtkcRaMiggmGYFJgMAHhexFVMICVaComuW1t91QBQKE9DpWgMLMhFwVrS0MhVQ9W2u7sKROfuDR1AzPTtCKArDX1j07l_dhrLiSzW11BeyZKbb_umPpV11UvskOY3cM7mKGEzNJFksYoTlCULlIcwA5mcURmhJAIz9P5GUJnCX4I4G_3h2g2X0QojhYpQvtWhlAAhbN06Tu0a9QPfwEgtsUC?type=png)](https://mermaid.live/edit#pako:eNqtlV1vmzAUhv-K5WuIwDak4WIXXXcxbd3FVmnS4qpysEmsgImwUcu6_vc5diCQfkxre3fMeY3f8_jj3MO85gJmcN2w3QZcnVNFVV4yrS9EAVYlU1ugTVNvRaZqJYJClqWLRqr1ptbmoAo50xvWNKzLQDJWU6XblV9EVuubdtUq0y4_V1Vr2KoUwH_IEJpFxCrYWoCCZQULyzrfXlPFZSNyI2sFvn6nasW0CFHodDcl60SzXCySBOdzAOLoy_n1QRJPJHOO5jyKAUD4cpBEE0kRpZhhDEAaOYlQ_JF3JW6X38StjXvzL_rdM41Dc2eWVxupgVsH1KrsQF4rw6TSlCqzEaCquSyk4MBiE_p6sm4cekA3eb3r_gNbHDaiWIK-ktNyGrGWduO6U77HLfKxLXmYPJrO7SKiiZcffSG2LvSo9EGMQi71dvmzkd56PkzySLxvzmzW7svJfwBV6FUMnemRh9dhRE9gPKFARhTi5ymQN1Ig70CBvJICefIwPSKBRiTw8yTwG0ngdyCBJyT-eanxy7dpqEBbs_5q9AHqAzJMc28CCMMP08u9P2x7MC6DphlyzJBpBh8zeJo5GNmn_vjY-qwqaSyhvBHMCP2nf9bAYBeEM6tv9Sg5VHCa6x-KYaWn04eOAdyTFLgbFTiggTtXvtkcRaMiggmGYFJgMAHhexFVMICVaComuW1t91QBQKE9DpWgMLMhFwVrS0MhVQ9W2u7sKROfuDR1AzPTtCKArDX1j07l_dhrLiSzW11BeyZKbb_umPpV11UvskOY3cM7mKGEzNJFksYoTlCULlIcwA5mcURmhJAIz9P5GUJnCX4I4G_3h2g2X0QojhYpQvtWhlAAhbN06Tu0a9QPfwEgtsUC)" + ] + }, + { + "cell_type": "markdown", + "id": "d2d7cb30", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## Building images with Dockerfile\n", + "\n", + "The Dockerfile is a text file that contains all the commands a user could\n", + "call on the command line to assemble an image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cafb03b5", + "metadata": {}, + "outputs": [], + "source": [ + "! cat Dockerfile" + ] + }, + { + "cell_type": "markdown", + "id": "97f1b55d", + "metadata": {}, + "source": [ + "Docker will execute all the commands in the Dockerfile and create a new image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb16a508", + "metadata": {}, + "outputs": [], + "source": [ + "!docker build -t apache-example ." + ] + }, + { + "cell_type": "markdown", + "id": "90a1aff7", + "metadata": {}, + "source": [ + "Open the [terminal](/terminals/docker) and:\n", + "\n", + "- inspect the image\n", + "- run the image with" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0faf5382", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "docker run --rm --detach apache-example" + ] + }, + { + "cell_type": "markdown", + "id": "ec99d74c", + "metadata": {}, + "source": [ + "#### Exercise\n", + "\n", + "Open the [terminal](/terminals/docker) and:\n", + "\n", + "- check if the container is running `docker ps`\n", + "- use `curl` to check if the webserver works\n", + "- stop the container with `docker stop`\n", + "- check the container status with `docker ps -a`\n", + "- remove the container with `docker rm`" + ] + }, + { + "cell_type": "markdown", + "id": "beca5e0c", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## docker-compose\n", + "\n", + "[Docker Compose](https://docs.docker.com/compose/) is a tool for defining and running complex applications with\n", + "Docker.\n", + "\n", + "With Compose, you define a multi-container application in a\n", + "single file, then spin your application up in a single command which\n", + "does everything that needs to be done to get it running." + ] + }, + { + "cell_type": "markdown", + "id": "6a07e434", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "## docker-compose\n", + "\n", + "Using Compose is basically a three-step process:\n", + "\n", + "1. define your app's image with a `Dockerfile`\n", + "1. define the services that make up your app in\n", + "`docker-compose.yaml` so they can be run together in an isolated\n", + "environment.\n", + "1. run `docker-compose up` and Compose will start and run your\n", + " entire app." + ] + }, + { + "cell_type": "markdown", + "id": "b1d11fd0", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "## docker-compose\n", + "\n", + "docker-compose.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "052b3886", + "metadata": { + "attributes": { + "classes": [ + "docker-compose" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "version: '2'\n", + "services:\n", + " web:\n", + " build: .\n", + " command: python app.py\n", + " ports:\n", + " - \"5000:5000\"\n", + " volumes:\n", + " - .:/code\n", + " links:\n", + " - redis\n", + " redis:\n", + " image: redis" + ] + }, + { + "cell_type": "markdown", + "id": "3a6a7da0", + "metadata": {}, + "source": [ + "Now run docker-compose up and Compose will start and run your entire app.\n", + "\n", + "Docker Compose is a basic example of Infrastructure as Code.\n", + "The infrastructure setup is defined in a file and can be versioned.\n", + "All the changes are tracked and can be reverted.\n", + "Administrators do not have to run commands on the server to setup the infrastructure." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a042f1ae", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "docker-compose up -d" + ] + }, + { + "cell_type": "markdown", + "id": "87bcbac1", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## docker-compose example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa756444", + "metadata": {}, + "outputs": [], + "source": [ + "version: \"2\"\n", + "services:\n", + " web:\n", + " image: piuma/phpsysinfo\n", + " ports:\n", + " - \"80:80\"" + ] + }, + { + "cell_type": "markdown", + "id": "b80ee452", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "## docker-compose example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e9e3bf9", + "metadata": { + "attributes": { + "classes": [ + "docker-compose" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "version: \"2\"\n", + "services:\n", + " web1:\n", + " image: piuma/phpsysinfo\n", + " web2:\n", + " image: piuma/phpsysinfo\n", + " proxy:\n", + " image: tutum/haproxy\n", + " links:\n", + " - web1\n", + " - web2\n", + " ports:\n", + " - \"80:80\"" + ] + }, + { + "cell_type": "markdown", + "id": "9787fa9a", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "## docker-compose example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dce3e371", + "metadata": {}, + "outputs": [], + "source": [ + "version: \"2\"\n", + "services:\n", + " web1:\n", + " image: piuma/phpsysinfo\n", + " web2:\n", + " image: piuma/phpsysinfo\n", + " web3:\n", + " image: piuma/phpsysinfo\n", + " proxy:\n", + " image: tutum/haproxy\n", + " links:\n", + " - web1\n", + " - web2\n", + " - web3\n", + " ports:\n", + " - \"80:80\"" + ] + }, + { + "cell_type": "markdown", + "id": "c14d280d", + "metadata": { + "slideshow": { + "slide_type": "subslide" + } + }, + "source": [ + "----\n", + "\n", + "## docker-compose example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1eefc329", + "metadata": {}, + "outputs": [], + "source": [ + "version: \"2\"\n", + "services:\n", + " phpmyadmin:\n", + " image: nazarpc/phpmyadmin\n", + " links:\n", + " - mysql\n", + " ports:\n", + " - \"8080:80\"\n", + " mysql:\n", + " image: mysql\n", + " environment:\n", + " - MYSQL_ROOT_PASSWORD=secret" + ] + }, + { + "cell_type": "markdown", + "id": "6fd5c054", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "## What next?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e118edc5", + "metadata": {}, + "outputs": [], + "source": [ + "!docker run --rm mribeiro/cowsay \"Any questions?\"" + ] + }, + { + "cell_type": "markdown", + "id": "628ca78e", + "metadata": {}, + "source": [ + "---" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docker-101/notebooks/Dockerfile b/docker-101/notebooks/Dockerfile new file mode 100644 index 0000000..8f70b36 --- /dev/null +++ b/docker-101/notebooks/Dockerfile @@ -0,0 +1,22 @@ +# +# This is a simple Dockerfile to build an image with Apache2. +# You can build it with: +# +# $ docker build -t test-apache2 . +# +FROM debian:stable-slim + +# Set the maintainer. See https://docs.docker.com/engine/reference/builder/#maintainer-deprecated +LABEL org.opencontainers.image.authors="piuma@piumalab.org" + +# Install Apache2 and finally clean up the APT cache. +# There are different opinions on whether to `apt-get -y upgrade` or not. +# Check this interesting discussion: https://github.com/docker/docs/pull/12571 +RUN apt-get update && apt-get -y install apache2 && apt-get clean + +# Expose the default ports. https://docs.docker.com/engine/reference/builder/#expose +EXPOSE 80/tcp +EXPOSE 443/tcp + +# Set the default command to run when starting the container +CMD ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] diff --git a/docker-101/tox.ini b/docker-101/tox.ini new file mode 100644 index 0000000..c8624a4 --- /dev/null +++ b/docker-101/tox.ini @@ -0,0 +1,18 @@ +# +# When you edit the markdown file, you need to rebuild the .ipynb file +# via tox. +# +# $ pip install tox +# $ tox +# +[tox] +envlist = py3 +skipsdist = True + +[testenv] +deps = + git+https://github.com/ioggstream/notedown +allowlist_externals = + make +commands = + make diff --git a/git-101/01-git.md b/git-101/01-git.md index dcf9783..ba77430 100644 --- a/git-101/01-git.md +++ b/git-101/01-git.md @@ -2,10 +2,10 @@ ## Agenda - - VCS intro - - tracking modifications - - using a local git repository - - remote repositories +- VCS intro +- tracking modifications +- using a local git repository +- remote repositories *Beware*: commands contain small typos. You must fix them to properly complete the course! @@ -15,10 +15,10 @@ The importance of tracking changes. VCS basics: - - initialize a local repo - - change the software - - acknowledge (commit) changes - - eventually revert changes +- initialize a local repo +- change the software +- acknowledge (commit) changes +- eventually revert changes --- @@ -28,7 +28,7 @@ Track modifications of our config files without messing with the real /etc. ``` -! mkdir -p /repo-path +!mkdir -p /repo-path !cp -v /etc/host* /etc/s* /repo-path ``` @@ -47,7 +47,6 @@ Always timestamp backup copies, don't `.ori`. Exercise: Use `date +%s` to timestamp a backup copy of `hosts`. - ``` # Use this cell for the exercise. ``` @@ -62,10 +61,8 @@ Used to maintain the Linux Kernel. Distributed approach. - ![Checkout and Push](https://git-scm.com/figures/18333fig0106-tn.png) - --- ## Tracing requires identification @@ -93,15 +90,16 @@ Track modifications with `git` ### Exercise - - get the previous `git config ... user.email` - - remove the `--global` flag from the previous command - - run it +- get the previous `git config ... user.email` +- remove the `--global` flag from the previous command +- run it ``` # Write here the command # and show the git config file. !cat .git/config ``` + --- Enter in the repo directory and check the status: there @@ -128,7 +126,7 @@ Now we have all `host*` files to be tracked. Add files to the index ``` -! git add hosts +!git add hosts ``` The file is now *staged* for commit. It's not archived though. @@ -140,7 +138,7 @@ The file is now *staged* for commit. It's not archived though. Save files to the local index ``` -! git commit -m "Initial snapshot of hosts" +!git commit -m "Initial snapshot of hosts" ``` ![Git areas](https://git-scm.com/images/about/index1@2x.png) @@ -168,18 +166,18 @@ and finally save them in the repo. ``` !git commit "Added localhost2 to hosts" ``` + --- ## History changes Now we have an history with two changes, containing: - - commit messages - - a commit hash +- commit messages +- a commit hash HEAD is the last commit. - ``` !git log ``` @@ -193,7 +191,7 @@ HEAD is the last commit. We can revert a change using the hash or an history log ``` -! git checkout HEAD~1 -- hosts # revert hosts to the previous commit +!git checkout HEAD~1 -- hosts # revert hosts to the previous commit ``` --- @@ -203,10 +201,11 @@ We can revert a change using the hash or an history log Now some git commands, but first create a dir. ``` -! mkdir -p /repo-path +!mkdir -p /repo-path !date >> /repo-path/file.txt !date >> /repo-path/hi.txt ``` + --- ``` @@ -217,7 +216,7 @@ Now some git commands, but first create a dir. ### Exercise - - add `file.txt` to the index and commit +- add `file.txt` to the index and commit ``` # Use this cell for the exercise @@ -225,7 +224,6 @@ Now some git commands, but first create a dir. --- - ``` !date >> /repo-path/file.txt !git diff @@ -234,7 +232,6 @@ Now some git commands, but first create a dir. --- - ``` !git log /repo-path/file.txt # show changes ``` @@ -258,14 +255,14 @@ Now some git commands, but first create a dir. Writing codes and configuration we may want to follow different strategies and save our different attempts. - - *tag* makes an unmodifiable snapshot of the repo instead. +- *tag* makes an unmodifiable snapshot of the repo instead. ``` !git tag myconfig-v1 # create a tag !git tag -l # list tags ``` - - *branch* create a modifiable copy of the code, allowing +- *branch* create a modifiable copy of the code, allowing to save and work on different features ![Branches](https://git-scm.com/figures/18333fig0313-tn.png) @@ -341,10 +338,10 @@ And switch back ### Exercise - - Create a new branch named `antani` - - modify `new-file.txt` as you please - - open a terminal, and use `git add -p` to stage the changes. What does it do? - - commit the changes +- Create a new branch named `antani` +- modify `new-file.txt` as you please +- [open a terminal](/terminals/git), and use `git add -p` to stage the changes. What does it do? +- commit the changes ``` # Use this cell for the exercise @@ -368,23 +365,34 @@ You have to remove the changes or commit them (in another branch too) # Use this cell for the exercise. ``` - --- ## Merge -Once we have consolidated some changes (Eg. test, ...) +Once we have consolidated some changes (e.g., test, ...) we can *merge* the changes into the master branch ``` !git checkout master +``` + +Before merging, we have to check the differences + +``` !git diff work-on-my-changes +``` + +And finally merge + +``` !git merge work-on-my-changes ``` --- After a merge, if the branch is no more useful, we can remove it. +Note: before deleting a branch, you can double-check available +branches with `git branch -a`. ``` !git branch -d work-on-changes @@ -394,9 +402,9 @@ If there are unmerged changes, git doesn't allow deleting a branch. Exercise: - - use `git branch -d` to remove the `antani` branch - - what happens? - - replace `-d` with `-D`. Does it work now? +- use `git branch -d` to remove the `antani` branch +- what happens? +- replace `-d` with `-D`. Does it work now? ``` # use this cell for the exrcise @@ -413,16 +421,28 @@ You can stage partial changes with: !git add -p ``` - --- ## Remote repositories Remote repos may be either https, ssh or files. +``` +! mkdir -p /repo-tmp && cd /repo-tmp && pwd # use another directory ;) +``` + +Exercise: + +- what happens in the following cell? ``` -! mkdir -p /repo-tmp && cd /repo-tmp # use another directory +!pwd +``` + +Go to the correct directory now. + +``` +cd /repo-tmp ``` --- @@ -433,18 +453,23 @@ Git clone downloads a remote repo, with all its changes and history. Try with a remote https repo. ``` -! git clone https://github.com/ioggstream/python-course/ python-course -cd /repo-tmp/python-course +!git clone https://github.com/ioggstream/python-course/ python-course ``` -Show repository configuration. Remote origin. +Now enter in the repo directory ``` -! git config -l +cd /repo-tmp/python-course +``` +Show repository configuration. Which is the remote origin? + +``` +!git config -l ``` The remote repo is retrieved with all its changes and history + ``` ! du -ms .git ``` @@ -454,22 +479,28 @@ And `log` can show branches and merges. ``` !git log --graph ``` + --- ### file repo A local repo can be cloned too, and has the same features -of a remote one. It's actually a remote file:// uri. +of a remote one. It's actually a remote `file://` uri. ``` -! git clone /repo-tmp/python-course /repo-tmp/my-course +!git clone /repo-tmp/python-course /repo-tmp/my-course ``` -Show repository configuration. Remote origin. +Now move to the new directory ``` -! git config -l +cd /repo-tmp/my-course +``` +Show repository configuration. Which is the remote origin? + +``` +!git config -l ``` --- @@ -478,33 +509,54 @@ Show repository configuration. Remote origin. You can add new files to a repo with the above workflow: - - create a branch with `git checkout -b test-1` - - add a new file - - stage changes with `git add` - - commit with `git commit` +- create a branch with `git checkout -b test-1` +- add a new file +- stage changes with `git add` +- commit with `git commit` + +``` +# Use this cell for the exercise +``` Now that your changes are on your local repo, you can synchronize / upload them to the remote copy with: ``` -! git push origin test-1 +!git push origin test-1 ``` Remember: - - origin is the URI specified by `git config -l` - - `test-1` is the branch name where you want to upload +- origin is the URI specified by `git config -l` +- `test-1` is the branch name where you want to upload To upload changes to the remote master (default) branch, you need to - - merge the changes to your local master +- merge the changes to your local master ``` !git checkout master +``` + +Check the differences + +``` +!git diff test-1 +``` + +And finally merge + +``` !git merge test-1 ``` - - push changes to master +Exercise: + +- check the master history; +- check the difference with the last commit. + + +Finally, push changes to `origin/master` ``` !git push origin master diff --git a/git-101/notebooks/01-git.ipynb b/git-101/notebooks/01-git.ipynb index f9f779e..cd265cf 100644 --- a/git-101/notebooks/01-git.ipynb +++ b/git-101/notebooks/01-git.ipynb @@ -2,16 +2,17 @@ "cells": [ { "cell_type": "markdown", + "id": "7f120a0a", "metadata": {}, "source": [ "# Git - simple repository management\n", "\n", "## Agenda\n", "\n", - " - VCS intro\n", - " - tracking modifications\n", - " - using a local git repository\n", - " - remote repositories\n", + "- VCS intro\n", + "- tracking modifications\n", + "- using a local git repository\n", + "- remote repositories\n", "\n", "*Beware*: commands contain small typos. You must fix them to properly complete the course!\n", "\n", @@ -21,16 +22,23 @@ "\n", "VCS basics:\n", "\n", - " - initialize a local repo\n", - " - change the software\n", - " - acknowledge (commit) changes\n", - " - eventually revert changes" + "- initialize a local repo\n", + "- change the software\n", + "- acknowledge (commit) changes\n", + "- eventually revert changes" ] }, { "cell_type": "markdown", - "metadata": {}, + "id": "c4538aef", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Tracking modifications\n", "\n", "Track modifications of our config files without messing\n", @@ -40,15 +48,17 @@ { "cell_type": "code", "execution_count": null, + "id": "700a7a3a", "metadata": {}, "outputs": [], "source": [ - "! mkdir -p /repo-path\n", + "!mkdir -p /repo-path\n", "!cp -v /etc/host* /etc/s* /repo-path" ] }, { "cell_type": "markdown", + "id": "9d560d87", "metadata": {}, "source": [ "All operations are local to /repo-path" @@ -57,6 +67,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4e7bc3b2", "metadata": {}, "outputs": [], "source": [ @@ -65,6 +76,7 @@ }, { "cell_type": "markdown", + "id": "99c3bcd9", "metadata": {}, "source": [ "Always timestamp backup copies, don't `.ori`." @@ -73,6 +85,7 @@ { "cell_type": "code", "execution_count": null, + "id": "cd27bc4d", "metadata": {}, "outputs": [], "source": [ @@ -82,6 +95,7 @@ }, { "cell_type": "markdown", + "id": "d6cd9236", "metadata": {}, "source": [ "Exercise: Use `date +%s` to timestamp a backup copy of `hosts`." @@ -90,6 +104,7 @@ { "cell_type": "code", "execution_count": null, + "id": "000235a9", "metadata": {}, "outputs": [], "source": [ @@ -98,8 +113,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "01f6ef62", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Git\n", "\n", "A better way of tracking changes.\n", @@ -108,23 +130,30 @@ "\n", "Distributed approach.\n", "\n", - "\n", "![Checkout and Push](https://git-scm.com/figures/18333fig0106-tn.png)" ] }, { "cell_type": "markdown", - "metadata": {}, + "id": "4786f164", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Tracing requires identification\n", "\n", - "Declare who's modifying files. This will be inserted in \n", + "Declare who's modifying files. This will be inserted in\n", "the commit." ] }, { "cell_type": "code", "execution_count": null, + "id": "7319e718", "metadata": {}, "outputs": [], "source": [ @@ -134,6 +163,7 @@ }, { "cell_type": "markdown", + "id": "8d9e2062", "metadata": {}, "source": [ "Note: authentication can not be enforced on a local repository." @@ -141,8 +171,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "6051c86d", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Create a repository\n", "\n", "Track modifications with `git`" @@ -151,6 +188,7 @@ { "cell_type": "code", "execution_count": null, + "id": "fdf04eb4", "metadata": {}, "outputs": [], "source": [ @@ -160,18 +198,20 @@ }, { "cell_type": "markdown", + "id": "b8c78e39", "metadata": {}, "source": [ "### Exercise\n", "\n", - " - get the previous `git config ... user.email` \n", - " - remove the `--global` flag from the previous command\n", - " - run it" + "- get the previous `git config ... user.email`\n", + "- remove the `--global` flag from the previous command\n", + "- run it" ] }, { "cell_type": "code", "execution_count": null, + "id": "69557bf0", "metadata": {}, "outputs": [], "source": [ @@ -182,7 +222,12 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "a981eecc", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ "---\n", "\n", @@ -193,14 +238,16 @@ { "cell_type": "code", "execution_count": null, + "id": "29a73ba6", "metadata": {}, "outputs": [], "source": [ - "!git status " + "!git status" ] }, { "cell_type": "markdown", + "id": "d9e41818", "metadata": {}, "source": [ "`.gitignore` lists the files we're not interested in" @@ -209,16 +256,18 @@ { "cell_type": "code", "execution_count": null, + "id": "7e24aaf6", "metadata": {}, "outputs": [], "source": [ "# Ignore all files not starting with h\n", "!echo \"[^h]*\" >> .gitignore\n", - "!git status " + "!git status" ] }, { "cell_type": "markdown", + "id": "c0463059", "metadata": {}, "source": [ "Now we have all `host*` files to be tracked." @@ -226,8 +275,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "93276090", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Populate the repo\n", "\n", "Add files to the index" @@ -236,14 +292,16 @@ { "cell_type": "code", "execution_count": null, + "id": "ba250a13", "metadata": {}, "outputs": [], "source": [ - "! git add hosts" + "!git add hosts" ] }, { "cell_type": "markdown", + "id": "7fdde7cd", "metadata": {}, "source": [ "The file is now *staged* for commit. It's not archived though." @@ -252,6 +310,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4a45be06", "metadata": {}, "outputs": [], "source": [ @@ -260,6 +319,7 @@ }, { "cell_type": "markdown", + "id": "ced6d9a3", "metadata": {}, "source": [ "Save files to the local index" @@ -268,14 +328,16 @@ { "cell_type": "code", "execution_count": null, + "id": "2246b5da", "metadata": {}, "outputs": [], "source": [ - "! git commit -m \"Initial snapshot of hosts\"" + "!git commit -m \"Initial snapshot of hosts\"" ] }, { "cell_type": "markdown", + "id": "e40f0406", "metadata": {}, "source": [ "![Git areas](https://git-scm.com/images/about/index1@2x.png)" @@ -283,8 +345,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "5c47fc45", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Basic workflow\n", "\n", "Adding a line to the file we discover that" @@ -293,15 +362,17 @@ { "cell_type": "code", "execution_count": null, + "id": "61a489ee", "metadata": {}, "outputs": [], "source": [ - "!echo \"127.0.0.2 localhost2.localdomain\" >> hosts \n", + "!echo \"127.0.0.2 localhost2.localdomain\" >> hosts\n", "!git diff hosts" ] }, { "cell_type": "markdown", + "id": "7b3d7ccc", "metadata": {}, "source": [ "If we like the changes, we can stage them" @@ -310,15 +381,17 @@ { "cell_type": "code", "execution_count": null, + "id": "89f6d0d0", "metadata": {}, "outputs": [], "source": [ "!git add hosts\n", - "!git status " + "!git status" ] }, { "cell_type": "markdown", + "id": "4f57b9e5", "metadata": {}, "source": [ "and finally save them in the repo." @@ -327,6 +400,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0b48ff38", "metadata": {}, "outputs": [], "source": [ @@ -335,7 +409,12 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "75973a82", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ "---\n", "\n", @@ -343,8 +422,8 @@ "\n", "Now we have an history with two changes, containing:\n", "\n", - " - commit messages\n", - " - a commit hash\n", + "- commit messages\n", + "- a commit hash\n", "\n", "HEAD is the last commit." ] @@ -352,14 +431,16 @@ { "cell_type": "code", "execution_count": null, + "id": "b1a64497", "metadata": {}, "outputs": [], "source": [ - "!git log " + "!git log" ] }, { "cell_type": "markdown", + "id": "42aad7f8", "metadata": {}, "source": [ "![Basic branch](https://git-scm.com/figures/18333fig0310-tn.png)" @@ -367,8 +448,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "f45574e8", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Reverting changes\n", "\n", "We can revert a change using the hash or an history log" @@ -377,16 +465,24 @@ { "cell_type": "code", "execution_count": null, + "id": "ec0651ef", "metadata": {}, "outputs": [], "source": [ - "! git checkout HEAD~1 -- hosts # revert hosts to the previous commit" + "!git checkout HEAD~1 -- hosts # revert hosts to the previous commit" ] }, { "cell_type": "markdown", - "metadata": {}, + "id": "96eba2fc", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Cheatsheet\n", "\n", "Now some git commands, but first create a dir." @@ -395,16 +491,18 @@ { "cell_type": "code", "execution_count": null, + "id": "c7f5263e", "metadata": {}, "outputs": [], "source": [ - "! mkdir -p /repo-path\n", + "!mkdir -p /repo-path\n", "!date >> /repo-path/file.txt\n", "!date >> /repo-path/hi.txt" ] }, { "cell_type": "markdown", + "id": "1b486c01", "metadata": {}, "source": [ "---" @@ -413,6 +511,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d625983f", "metadata": {}, "outputs": [], "source": [ @@ -423,25 +522,36 @@ }, { "cell_type": "markdown", + "id": "12c36cb1", "metadata": {}, "source": [ "### Exercise\n", "\n", - " - add `file.txt` to the index and commit" + "- add `file.txt` to the index and commit" ] }, { "cell_type": "code", "execution_count": null, + "id": "f2875a20", "metadata": {}, "outputs": [], "source": [ "# Use this cell for the exercise" ] }, + { + "cell_type": "markdown", + "id": "176dabc2", + "metadata": {}, + "source": [ + "---" + ] + }, { "cell_type": "code", "execution_count": null, + "id": "2786502f", "metadata": {}, "outputs": [], "source": [ @@ -450,9 +560,18 @@ "!git commit -a -m \"Save all previously added files\"" ] }, + { + "cell_type": "markdown", + "id": "5e605932", + "metadata": {}, + "source": [ + "---" + ] + }, { "cell_type": "code", "execution_count": null, + "id": "f3aa8b5d", "metadata": {}, "outputs": [], "source": [ @@ -462,6 +581,7 @@ { "cell_type": "code", "execution_count": null, + "id": "809cb347", "metadata": {}, "outputs": [], "source": [ @@ -471,6 +591,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5f408bf2", "metadata": {}, "outputs": [], "source": [ @@ -480,6 +601,7 @@ { "cell_type": "code", "execution_count": null, + "id": "51681f21", "metadata": {}, "outputs": [], "source": [ @@ -488,19 +610,27 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "ff306b1d", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ - "## Tags & Branches \n", + "---\n", + "\n", + "## Tags & Branches\n", "\n", "Writing codes and configuration we may want to follow\n", "different strategies and save our different attempts.\n", "\n", - " - *tag* makes an unmodifiable snapshot of the repo instead." + "- *tag* makes an unmodifiable snapshot of the repo instead." ] }, { "cell_type": "code", "execution_count": null, + "id": "effd478b", "metadata": {}, "outputs": [], "source": [ @@ -510,9 +640,10 @@ }, { "cell_type": "markdown", + "id": "4b05fced", "metadata": {}, "source": [ - "- *branch* create a modifiable copy of the code, allowing \n", + "- *branch* create a modifiable copy of the code, allowing\n", " to save and work on different features\n", "\n", "![Branches](https://git-scm.com/figures/18333fig0313-tn.png)" @@ -520,8 +651,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "65f48039", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Branches\n", "\n", "`master` is the default branch" @@ -530,6 +668,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d0f52580", "metadata": {}, "outputs": [], "source": [ @@ -538,6 +677,7 @@ }, { "cell_type": "markdown", + "id": "440c9e9d", "metadata": {}, "source": [ "Create a branch" @@ -546,6 +686,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1cbdaefc", "metadata": {}, "outputs": [], "source": [ @@ -554,6 +695,7 @@ }, { "cell_type": "markdown", + "id": "15d4499e", "metadata": {}, "source": [ "And list the branches, check the active one!" @@ -562,6 +704,7 @@ { "cell_type": "code", "execution_count": null, + "id": "15b4fb9e", "metadata": {}, "outputs": [], "source": [ @@ -570,14 +713,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "7a767353", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "Modify a file in a branch" ] }, { "cell_type": "code", "execution_count": null, + "id": "a4631b97", "metadata": {}, "outputs": [], "source": [ @@ -587,6 +738,7 @@ }, { "cell_type": "markdown", + "id": "38365710", "metadata": {}, "source": [ "With commit we consolidate the new file in the branch" @@ -595,6 +747,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0f8e151b", "metadata": {}, "outputs": [], "source": [ @@ -603,14 +756,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "284b07da", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "Compare branches" ] }, { "cell_type": "code", "execution_count": null, + "id": "64d828f6", "metadata": {}, "outputs": [], "source": [ @@ -619,6 +780,7 @@ }, { "cell_type": "markdown", + "id": "af639d85", "metadata": {}, "source": [ "Diff supports some parameters" @@ -627,6 +789,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ce494335", "metadata": {}, "outputs": [], "source": [ @@ -635,14 +798,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "ff806888", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "We can now switch between branches" ] }, { "cell_type": "code", "execution_count": null, + "id": "f9fcf98f", "metadata": {}, "outputs": [], "source": [ @@ -652,6 +823,7 @@ }, { "cell_type": "markdown", + "id": "8eceeeb6", "metadata": {}, "source": [ "And switch back" @@ -660,6 +832,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4a90452a", "metadata": {}, "outputs": [], "source": [ @@ -669,19 +842,27 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "60f1a8f7", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "### Exercise\n", "\n", - " - Create a new branch named `antani`\n", - " - modify `new-file.txt` as you please\n", - " - open a terminal, and use `git add -p` to stage the changes. What does it do?\n", - " - commit the changes" + "- Create a new branch named `antani`\n", + "- modify `new-file.txt` as you please\n", + "- [open a terminal](/terminals/git), and use `git add -p` to stage the changes. What does it do?\n", + "- commit the changes" ] }, { "cell_type": "code", "execution_count": null, + "id": "e2c7bd22", "metadata": {}, "outputs": [], "source": [ @@ -690,8 +871,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "1cd00765", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Checkout troubleshooting\n", "\n", "If you change a file, git won't make you checkout\n", @@ -701,6 +889,7 @@ { "cell_type": "code", "execution_count": null, + "id": "59e5a254", "metadata": {}, "outputs": [], "source": [ @@ -710,6 +899,7 @@ }, { "cell_type": "markdown", + "id": "71da518c", "metadata": {}, "source": [ "You have to remove the changes or commit them (in another branch too)" @@ -718,6 +908,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e22fd009", "metadata": {}, "outputs": [], "source": [ @@ -726,35 +917,87 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "0b77d8f6", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Merge\n", "\n", - "Once we have consolidated some changes (Eg. test, ...)\n", + "Once we have consolidated some changes (e.g., test, ...)\n", "we can *merge* the changes into the master branch" ] }, { "cell_type": "code", "execution_count": null, + "id": "770704f3", "metadata": {}, "outputs": [], "source": [ - "!git checkout master\n", - "!git diff work-on-my-changes\n", - "!git merge work-on-my-changes" + "!git checkout master" + ] + }, + { + "cell_type": "markdown", + "id": "92138e82", + "metadata": {}, + "source": [ + "Before merging, we have to check the differences" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f91d8d41", + "metadata": {}, + "outputs": [], + "source": [ + "!git diff work-on-my-changes" ] }, { "cell_type": "markdown", + "id": "c3610538", "metadata": {}, "source": [ - "After a merge, if the branch is no more useful, we can remove it." + "And finally merge" ] }, { "cell_type": "code", "execution_count": null, + "id": "e64c822b", + "metadata": {}, + "outputs": [], + "source": [ + "!git merge work-on-my-changes" + ] + }, + { + "cell_type": "markdown", + "id": "a31db651", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", + "After a merge, if the branch is no more useful, we can remove it.\n", + "Note: before deleting a branch, you can double-check available\n", + "branches with `git branch -a`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "71b22af5", "metadata": {}, "outputs": [], "source": [ @@ -763,20 +1006,22 @@ }, { "cell_type": "markdown", + "id": "3216d35a", "metadata": {}, "source": [ "If there are unmerged changes, git doesn't allow deleting a branch.\n", "\n", "Exercise:\n", "\n", - " - use `git branch -d` to remove the `antani` branch\n", - " - what happens?\n", - " - replace `-d` with `-D`. Does it work now?" + "- use `git branch -d` to remove the `antani` branch\n", + "- what happens?\n", + "- replace `-d` with `-D`. Does it work now?" ] }, { "cell_type": "code", "execution_count": null, + "id": "762915dd", "metadata": {}, "outputs": [], "source": [ @@ -785,8 +1030,15 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "869b3fda", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Selective adding\n", "\n", "You can stage partial changes with:" @@ -795,16 +1047,24 @@ { "cell_type": "code", "execution_count": null, + "id": "e49ff846", "metadata": {}, "outputs": [], "source": [ - "!git add -p " + "!git add -p" ] }, { "cell_type": "markdown", - "metadata": {}, + "id": "1ffb0b4b", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ + "---\n", + "\n", "## Remote repositories\n", "\n", "Remote repos may be either https, ssh or files." @@ -813,16 +1073,62 @@ { "cell_type": "code", "execution_count": null, + "id": "e4db9adc", + "metadata": {}, + "outputs": [], + "source": [ + "! mkdir -p /repo-tmp && cd /repo-tmp && pwd # use another directory ;)" + ] + }, + { + "cell_type": "markdown", + "id": "3f42676a", + "metadata": {}, + "source": [ + "Exercise:\n", + "\n", + "- what happens in the following cell?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aaa4aac3", "metadata": {}, "outputs": [], "source": [ - "! mkdir -p /repo-tmp && cd /repo-tmp # use another directory" + "!pwd" ] }, { "cell_type": "markdown", + "id": "5558113b", "metadata": {}, "source": [ + "Go to the correct directory now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c80c152", + "metadata": {}, + "outputs": [], + "source": [ + "cd /repo-tmp" + ] + }, + { + "cell_type": "markdown", + "id": "00d2f691", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", "### https repo\n", "\n", "Git clone downloads a remote repo, with all its changes and history.\n", @@ -832,31 +1138,52 @@ { "cell_type": "code", "execution_count": null, + "id": "93194032", + "metadata": {}, + "outputs": [], + "source": [ + "!git clone https://github.com/ioggstream/python-course/ python-course" + ] + }, + { + "cell_type": "markdown", + "id": "7e364def", + "metadata": {}, + "source": [ + "Now enter in the repo directory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "908d6e35", "metadata": {}, "outputs": [], "source": [ - "! git clone https://github.com/ioggstream/python-course/ python-course\n", "cd /repo-tmp/python-course" ] }, { "cell_type": "markdown", + "id": "f67f6120", "metadata": {}, "source": [ - "Show repository configuration. Remote origin." + "Show repository configuration. Which is the remote origin?" ] }, { "cell_type": "code", "execution_count": null, + "id": "9d125c4b", "metadata": {}, "outputs": [], "source": [ - "! git config -l \n" + "!git config -l" ] }, { "cell_type": "markdown", + "id": "74dccdd3", "metadata": {}, "source": [ "The remote repo is retrieved with all its changes and history" @@ -865,6 +1192,7 @@ { "cell_type": "code", "execution_count": null, + "id": "72d8a61c", "metadata": {}, "outputs": [], "source": [ @@ -873,6 +1201,7 @@ }, { "cell_type": "markdown", + "id": "ce9dc27b", "metadata": {}, "source": [ "And `log` can show branches and merges." @@ -881,6 +1210,7 @@ { "cell_type": "code", "execution_count": null, + "id": "99152bbc", "metadata": {}, "outputs": [], "source": [ @@ -889,54 +1219,103 @@ }, { "cell_type": "markdown", - "metadata": {}, + "id": "da3f94c6", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, "source": [ "---\n", "\n", "### file repo\n", "\n", "A local repo can be cloned too, and has the same features\n", - "of a remote one. It's actually a remote file:// uri." + "of a remote one. It's actually a remote `file://` uri." ] }, { "cell_type": "code", "execution_count": null, + "id": "172e28c4", "metadata": {}, "outputs": [], "source": [ - "! git clone /repo-tmp/python-course /repo-tmp/my-course" + "!git clone /repo-tmp/python-course /repo-tmp/my-course" ] }, { "cell_type": "markdown", + "id": "5142954c", "metadata": {}, "source": [ - "Show repository configuration. Remote origin." + "Now move to the new directory" ] }, { "cell_type": "code", "execution_count": null, + "id": "024280c5", "metadata": {}, "outputs": [], "source": [ - "! git config -l\n" + "cd /repo-tmp/my-course" ] }, { "cell_type": "markdown", + "id": "99ee299e", "metadata": {}, "source": [ + "Show repository configuration. Which is the remote origin?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "505037a8", + "metadata": {}, + "outputs": [], + "source": [ + "!git config -l" + ] + }, + { + "cell_type": "markdown", + "id": "22ce0715", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "---\n", + "\n", "## Pull & push\n", "\n", "You can add new files to a repo with the above workflow:\n", "\n", - " - create a branch with `git checkout -b test-1`\n", - " - add a new file\n", - " - stage changes with `git add`\n", - " - commit with `git commit`\n", - "\n", + "- create a branch with `git checkout -b test-1`\n", + "- add a new file\n", + "- stage changes with `git add`\n", + "- commit with `git commit`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42505c18", + "metadata": {}, + "outputs": [], + "source": [ + "# Use this cell for the exercise" + ] + }, + { + "cell_type": "markdown", + "id": "73ce1851", + "metadata": {}, + "source": [ "Now that your changes are on your local repo, you can synchronize / upload them to the remote copy\n", "with:" ] @@ -944,46 +1323,92 @@ { "cell_type": "code", "execution_count": null, + "id": "420e52e1", "metadata": {}, "outputs": [], "source": [ - "! git push origin test-1" + "!git push origin test-1" ] }, { "cell_type": "markdown", + "id": "af356259", "metadata": {}, "source": [ "Remember:\n", "\n", - " - origin is the URI specified by `git config -l`\n", - " - `test-1` is the branch name where you want to upload\n", + "- origin is the URI specified by `git config -l`\n", + "- `test-1` is the branch name where you want to upload\n", "\n", "To upload changes to the remote master (default) branch, you need to\n", "\n", - " - merge the changes to your local master" + "- merge the changes to your local master" ] }, { "cell_type": "code", "execution_count": null, + "id": "f1cc68b6", + "metadata": {}, + "outputs": [], + "source": [ + "!git checkout master" + ] + }, + { + "cell_type": "markdown", + "id": "3d948893", + "metadata": {}, + "source": [ + "Check the differences" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62cdee76", + "metadata": {}, + "outputs": [], + "source": [ + "!git diff test-1" + ] + }, + { + "cell_type": "markdown", + "id": "0b3bf0a4", + "metadata": {}, + "source": [ + "And finally merge" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "583a589e", "metadata": {}, "outputs": [], "source": [ - "!git checkout master\n", "!git merge test-1" ] }, { "cell_type": "markdown", + "id": "bc785752", "metadata": {}, "source": [ - "- push changes to master" + "Exercise:\n", + "\n", + "- check the master history;\n", + "- check the difference with the last commit.\n", + "\n", + "\n", + "Finally, push changes to `origin/master`" ] }, { "cell_type": "code", "execution_count": null, + "id": "81dfa372", "metadata": {}, "outputs": [], "source": [ @@ -992,6 +1417,7 @@ }, { "cell_type": "markdown", + "id": "a65f7a63", "metadata": {}, "source": [ "To make it work, you need to be authenticated/authorized with the remote repo ;)" @@ -1000,5 +1426,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/git-101/notebooks/01-git.slides.html b/git-101/notebooks/01-git.slides.html new file mode 100644 index 0000000..1c72edd --- /dev/null +++ b/git-101/notebooks/01-git.slides.html @@ -0,0 +1,15318 @@ + + + + + + + + + +01-git slides + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + diff --git a/git-101/tox.ini b/git-101/tox.ini index 268e6e7..cee54b2 100644 --- a/git-101/tox.ini +++ b/git-101/tox.ini @@ -5,7 +5,7 @@ skipsdist = True [testenv] deps = git+https://github.com/ioggstream/notedown -whitelist_externals = +allowlist_externals = make commands = make