diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000000..513f9979366 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +* +!playbooks/ +!docker/build/* +!docker/devstack_common_ansible_overrides.yml +!docker/build/*/*.yml +docker/build/*/Dockerfile +!docker/plays/ +!util/install/ diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000000..6dc762d1d4b --- /dev/null +++ b/.editorconfig @@ -0,0 +1,27 @@ +# This file is for unifying the coding style for different editors and IDEs. +# More information at http://EditorConfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +max_line_length = 120 +trim_trailing_whitespace = true + +[*.{yml,yaml}] +indent_size = 2 + +[*.rst] +max_line_length = 79 + +[*.mk] +indent_style = tab +indent_size = 8 + +[Makefile] +indent_style = tab +indent_size = 8 diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE new file mode 100644 index 00000000000..93251af4205 --- /dev/null +++ b/.github/ISSUE_TEMPLATE @@ -0,0 +1,91 @@ +## This configuration file overrides the inherited configuration file defined +## in openedx/.github/.github/ISSUE_TEMPLATE because this repo currently does +## not have Issues turned on, so we create this override to *only* show DEPR +## issues to users creating Issues. Once Issues are turned on and the repo is +## ready to accept Issues of all types, this file must be deleted so inheritance +## of standard openedx configuration works properly. + +name: Deprecation (DEPR) Ticket +description: Per OEP-21, use this template to begin the technology deprecation process. +title: "[DEPR]: " +labels: ["DEPR"] +body: + - type: markdown + attributes: + value: | + Refer to [OEP-21](https://open-edx-proposals.readthedocs.io/en/latest/processes/oep-0021-proc-deprecation.html) for more detail on the deprecation and removal process. This ticket should only be used for proposing the removal of an Open edX technology. + Please leave [DEPR] in the title of your ticket! + - type: input + id: todays-date + attributes: + label: Proposal Date + description: The start date of this proposal (likely today) + placeholder: 29 February 2020 + validations: + required: true + - type: input + id: accept-date + attributes: + label: Ticket Acceptance Date + description: When is the target date for getting this proposal accepted? + placeholder: 29 February 2020 + validations: + required: true + - type: input + id: remove-date + attributes: + label: Technology Removal Date + description: When is the target date for getting this technology removed? + placeholder: 29 February 2020 + validations: + required: true + - type: input + id: named-release-without + attributes: + label: First Open edX Named Release Without This Functionality + description: Named releases are generally CUT in early April and early October. Based on the above removal date, what named release would be the first without this code? Please reach out to the Build Test Release working group (#wg-build-test-release in Slack) if you're not sure. + placeholder: Dogwood + validations: + required: true + - type: textarea + id: rationale + attributes: + label: Rationale + description: Explain, in a few sentences, why this technology should be removed - what's the usage pattern? What's wrong with keeping it around? + validations: + required: true + - type: textarea + id: removal + attributes: + label: Removal + description: Include a description with links to what exactly is being removed. + validations: + required: true + - type: textarea + id: replacement + attributes: + label: Replacement + description: Include a description with links to what this is being replaced by. + validations: + required: true + - type: textarea + id: deprecation + attributes: + label: Deprecation + description: If you plan to mark the code for deprecation, explain how. + validations: + required: false + - type: textarea + id: migration + attributes: + label: Migration + description: If automated migration will be needed, explain your migration plan. + validations: + required: false + - type: textarea + id: addl-info + attributes: + label: Additional Info + description: If there is any additional publicly sharable information or data from your earlier analysis, include that. + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000000..50e147ffc92 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,18 @@ +GitHub issues are not meant for support questions. Please use the +[mailing list](https://groups.google.com/forum/#!forum/openedx-ops) +or [Slack channels](https://open.edx.org/blog/open-edx-slack) to get help. + +Please open issues here to report bugs in the ansible scripts themselves. + +When reporting an issue, please include the following information. + +If `/edx/bin/show-repo-heads` is available on your system, it can provide much of this information: + +- Configuration ref: +- edx-platform ref: +- other refs: + +#Steps to replicate: +- one +- two +- three diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e55706fa566 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ +Configuration Pull Request +--- + +Make sure that the following steps are done before merging: + + - [ ] Have a Site Reliability Engineer review the PR if you don't own all of the services impacted. + - [ ] If you are adding any new default values that need to be overridden when this change goes live, update internal repos and add an entry to the top of the CHANGELOG. + - [ ] Performed the appropriate testing. + - [ ] Think about how this change will affect Open edX operators and update the wiki page for the next Open edX release if needed diff --git a/.github/workflows/add-depr-ticket-to-depr-board.yml b/.github/workflows/add-depr-ticket-to-depr-board.yml new file mode 100644 index 00000000000..250e394abc1 --- /dev/null +++ b/.github/workflows/add-depr-ticket-to-depr-board.yml @@ -0,0 +1,19 @@ +# Run the workflow that adds new tickets that are either: +# - labelled "DEPR" +# - title starts with "[DEPR]" +# - body starts with "Proposal Date" (this is the first template field) +# to the org-wide DEPR project board + +name: Add newly created DEPR issues to the DEPR project board + +on: + issues: + types: [opened] + +jobs: + routeissue: + uses: openedx/.github/.github/workflows/add-depr-ticket-to-depr-board.yml@master + secrets: + GITHUB_APP_ID: ${{ secrets.GRAPHQL_AUTH_APP_ID }} + GITHUB_APP_PRIVATE_KEY: ${{ secrets.GRAPHQL_AUTH_APP_PEM }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_ISSUE_BOT_TOKEN }} diff --git a/.github/workflows/add-remove-label-on-comment.yml b/.github/workflows/add-remove-label-on-comment.yml new file mode 100644 index 00000000000..0f369db7d29 --- /dev/null +++ b/.github/workflows/add-remove-label-on-comment.yml @@ -0,0 +1,20 @@ +# This workflow runs when a comment is made on the ticket +# If the comment starts with "label: " it tries to apply +# the label indicated in rest of comment. +# If the comment starts with "remove label: ", it tries +# to remove the indicated label. +# Note: Labels are allowed to have spaces and this script does +# not parse spaces (as often a space is legitimate), so the command +# "label: really long lots of words label" will apply the +# label "really long lots of words label" + +name: Allows for the adding and removing of labels via comment + +on: + issue_comment: + types: [created] + +jobs: + add_remove_labels: + uses: openedx/.github/.github/workflows/add-remove-label-on-comment.yml@master + diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml new file mode 100644 index 00000000000..fec11d6c259 --- /dev/null +++ b/.github/workflows/commitlint.yml @@ -0,0 +1,10 @@ +# Run commitlint on the commit messages in a pull request. + +name: Lint Commit Messages + +on: + - pull_request + +jobs: + commitlint: + uses: openedx/.github/.github/workflows/commitlint.yml@master diff --git a/.github/workflows/mysql-5.7-build.yml b/.github/workflows/mysql-5.7-build.yml new file mode 100644 index 00000000000..6aeca0cf202 --- /dev/null +++ b/.github/workflows/mysql-5.7-build.yml @@ -0,0 +1,41 @@ +name: MySQL5.7 image build + +on: + push: + branches: + - master + paths: + - "docker/build/mysql/Dockerfile" + - .github/workflows/mysql-5.7-build.yml + +jobs: + build: + strategy: + matrix: + build_platform: ["linux/amd64"] + runs-on: ubuntu-latest + name: Build amd64 mysql image + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Setup QEMU + uses: docker/setup-qemu-action@v2 + with: + platforms: ${{ matrix.build_platform }} + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - + name: Build and push amd and arm image + uses: docker/build-push-action@v3 + with: + push: true + context: docker/build/mysql/ + tags: edxops/edx-mysql:5.7 + platforms: ${{ matrix.build_platform }} diff --git a/.github/workflows/playbook-test.yml b/.github/workflows/playbook-test.yml new file mode 100644 index 00000000000..8bb8315feb6 --- /dev/null +++ b/.github/workflows/playbook-test.yml @@ -0,0 +1,30 @@ +name: Playbook Test + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + max-parallel: 4 + matrix: + python-version: [3.8] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Install Dependencies + run: | + pip install demjson3 + pip install -r requirements.txt + - name: Run Make test.syntax + run: | + timeout 90m make --keep-going test.playbooks diff --git a/.github/workflows/self-assign-issue.yml b/.github/workflows/self-assign-issue.yml new file mode 100644 index 00000000000..37522fd57b1 --- /dev/null +++ b/.github/workflows/self-assign-issue.yml @@ -0,0 +1,12 @@ +# This workflow runs when a comment is made on the ticket +# If the comment starts with "assign me" it assigns the author to the +# ticket (case insensitive) + +name: Assign comment author to ticket if they say "assign me" +on: + issue_comment: + types: [created] + +jobs: + self_assign_by_comment: + uses: openedx/.github/.github/workflows/self-assign-issue.yml@master diff --git a/.github/workflows/syntax-test.yml b/.github/workflows/syntax-test.yml new file mode 100644 index 00000000000..03693045e0a --- /dev/null +++ b/.github/workflows/syntax-test.yml @@ -0,0 +1,30 @@ +name: Syntax Test + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + max-parallel: 4 + matrix: + python-version: [3.8] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Install Dependencies + run: | + pip install demjson3 + pip install -r requirements.txt + - name: Run Make test.syntax + run: | + timeout 90m make --keep-going test.syntax diff --git a/.github/workflows/upgrade-python-requirements.yml b/.github/workflows/upgrade-python-requirements.yml new file mode 100644 index 00000000000..e951fc09388 --- /dev/null +++ b/.github/workflows/upgrade-python-requirements.yml @@ -0,0 +1,24 @@ +name: Upgrade Requirements + +on: + schedule: + - cron: "15 1 * * 1" + workflow_dispatch: + inputs: + branch: + description: 'Target branch to create requirements PR against' + required: true + default: 'master' +jobs: + call-upgrade-python-requirements-workflow: + with: + branch: ${{ github.event.inputs.branch }} + team_reviewers: "2u-sre" + email_address: tools-jenkins-isre@2u-internal.opsgenie.net + send_success_notification: false + secrets: + requirements_bot_github_token: ${{ secrets.REQUIREMENTS_BOT_GITHUB_TOKEN }} + requirements_bot_github_email: ${{ secrets.REQUIREMENTS_BOT_GITHUB_EMAIL }} + edx_smtp_username: ${{ secrets.EDX_SMTP_USERNAME }} + edx_smtp_password: ${{ secrets.EDX_SMTP_PASSWORD }} + uses: openedx/.github/.github/workflows/upgrade-python-requirements.yml@master diff --git a/.gitignore b/.gitignore index 6d810170887..31d23156826 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,30 @@ *~ .#* -vagrant/*/devstack/edx-platform -vagrant/*/devstack/cs_comments_service -vagrant/*/devstack/ora +vagrant/*/*/edx-platform +vagrant/*/*/cs_comments_service +vagrant/*/*/ora +vagrant/*/*/analytics_api +vagrant/*/*/insights +vagrant/*/*/ecommerce +vagrant/*/*/programs +vagrant_ansible_inventory_default + +### OS X artifacts +*.DS_Store +.AppleDouble +:2e_* +:2e# + +## Make artifacts +.build +playbooks/travis-test.yml + +## Local virtualenv +/venv + +## Ansible Artifacts +*.retry + +### VisualStudioCode ### +.vscode/* diff --git a/AUTHORS b/AUTHORS old mode 100644 new mode 100755 index e0644005db5..03e5648d2a0 --- a/AUTHORS +++ b/AUTHORS @@ -17,3 +17,47 @@ Kevin Luo Carson Gee Xavier Antoviaque James Tauber +Bertrand Marron +Han Su Kim +Ned Batchelder +Dave St.Germain +Gabe Mulley +Greg Price +William Desloge +Valera Rozuvan +Ker Ruben Ramos +Fred Smith +Wang Peifeng +Ray Hooker +David Pollack +Rodolphe Quiedeville +Matjaz Gregoric +Ben Patterson +Jason Zhu +Rohit Karajgi +Brandon DeRosier +Arbab Nazar +Nilesh Londhe +Riccardo Murri +Xaver Y.R. Chen +Cristian Salamea +Jonathan Peter +Kevin Falcone +Max Rothman +Andy Armstrong +Xiang Junfu +Sarina Canelake +Steven Burch +Dan Powell +Omar Al-Ithawi +David Adams +Florian Haas +Shohei Maeda +Bill DeRusha +Jillian Vogel +Zubair Afzal +Kyle McCormick +Muzaffar Yousaf +Dave St.Germain +Rabia Iftikhar +Mostafa Hussein diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000000..9f524eb0fae --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1536 @@ + +# Changelog + +All notable changes to this project will be documented in this file. +Add any new changes to the top (right below this line). + +- 2024-03-20 + - Add `COMMON_ENABLE_DATADOG_APP` for APM instrumention, supported in LMS and CMS so far. Disabled by default. + +- 2024-01-25 + - Role: mfe + - Added `MFE_ENVIRONMENT_DEFAULT_EXTRA` to allow operators to add extra environment variables to all MFEs when + deploying them with the `mfe_deployer` role. + +- 2023-10-09 + + - Role: edxapp + - Setting default value for `EDXAPP_EDXAPP_SECRET_KEY` as if it is not override in config secure, + helper script template can still be generated + +- 2023-09-28 + - Role: prospectus + - Remove `GATSBY_TURN_ON_SIDEBAR_FILTERS` flag + +- 2023-08-29 + - Role: prospectus + - Added `GATSBY_TURN_ON_SIDEBAR_FILTERS` flag to enable new sidebar changes + + - 2023-06-20 + - Role: prospectus + - Added `GATSBY_XPERT_STG_API_URL`, `GATSBY_XPERT_UAT_API_URL` and `GATSBY_XPERT_PROD_API_URL` + to enable Prospectus to communicate with the Xpert API + + - 2023-04-07 + - Changed default value of `EDXAPP_MONGO_REPLICA_SET` to `null` from existing + empty string `""`, to make it compatible with pymongo >= 3.11 in Nutmeg and above. + + - 2023-03-27 + - Changed default value of `MFE_ORDER_HISTORY_URL` to empty string `""` to + prevent MFE header dropdown from rendering `Order History` option when + Order History URL is not configured. + +- 2023-03-22 + - Role: prospectus + - Added `GATSBY_ZENDESK_KEY` to enable Zendesk chat web widget (classic) on edx.org Marketplace. + +- 2023-02-27 + - Role: edx_django_service + - Enable Celery workers in supervisor when + `edx_django_service_enable_celery_workers` is true. + + - 2023-02-23 + - Role: payment + - Add `PAYMENT_STRIPE_PUBLISHABLE_KEY` & `PAYMENT_STRIPE_RESPONSE_URL` + settings for operators running Payment MFE with Stripe. + + - 2023-01-18 + - Updated sandbox deployment scripts to setup edx-platform in docker containers. + - Added Fluentd for checking tracking logs when running edx-platform in docker containers. + + - 2022-07-25 + - Add new routing key for individual learner course regrade queue + - 2022-06-08 + - Role: edxapp + - Added a new `CELERY_RESULT_BACKEND` setting to allow operators to + override the default celery result backend. + + - 2022-06-06 + - Role: common + - Remove PPA for `watchman` as we have shifted to Ubuntu 20.04, + if you are running this against Ubuntu version less than 20.04 + then installation will fail. + + - 2022-06-01 + - Upgrade ansible to 2.9 + + - 2022-04-06 + - Role: edxapp + - Added a new `EDXAPP_COMPILE_JSI18N` variable to control whether + to run the `compilejsi18n` management command on edxapp deploy. + Defaults to false. + + - 2022-04-06 + - Role: simple_theme + - Added a new `SIMPLETHEME_I18N_DJANGO` setting to allow operators to provide + additional translations, or override existing django translations. + + - 2022-03-25 + - Role: edxapp + - Added a new `CUSTOM_RESOURCE_TEMPLATES_DIRECTORY` setting to allow operators to + override the default resource templates. + + - 2022-02-01 + - Role: edxapp + - Added a new `EDXAPP_PREPEND_LOCALE_PATHS` setting to allow operators to + override the default translations. + + - 2022-01-06 + - Role: edx_notes_api + - Replaced `ELASTICSEARCH_URL` with `ELASTICSEARCH_DSL` in `edx_notes_api_service_config`. + + - 2022-01-06 + - Role: discovery + - Replaced `ELASTICSEARCH_URL` with `ELASTICSEARCH_CLUSTER_URL` in `discovery_service_config_overrides`. + + - 2022-01-05 + - Remove an extraneous `-A ecommerce_worker` from the ecomworker startup script, + which was preventing the celery worker process from starting. + + - 2021-11-30 + - Upgrade celery to 5.2.0 and adjust CLI call parameters too + - Bumped single-beat to use a more supported fork of the project + + - 2021-11-01 + - Docker: edxapp + - Removed unnecessary `CELERY_QUEUES` overrides for LMS and Studio. + Instead, just use the default value of `CELERY_QUEUES` as set in + edx-platform's settings files. + Functionally, this means that in addition to the existing queues + that LMS and Studio defined, there is now a "low priority" queue + for Studio, suitable for tasks like a CourseGraph dump. + + - 2021-10-20 + - Role neo4j + - Upgrade Neo4j from 3.2.2 to 3.5.28. + + - 2021-08-26 + - Role neo4j + - Bring Neo4j role closer in with what we really deploy: + - Change Neo4j version from 3.2.2 to 3.3.1. + - Expose Bolt on 0.0.0.0:7687 with optional encryption. + - Enable `dbms.allow_upgrade`, which is the new name of the `dbms.allow_format_migration` key. + - Remove http->https redirection logic when NGINX_ENABLE_SSL is false. + + - 2021-09-28 + - Role nginx + - Add `NGINX_ENABLE_IPV6` configuration variable to make nginx + services listen on the IPv6 wildcard address (in addition to + the IPv4 one, where services always listen). Defaults to true. + + - 2021-09-19 + - Remove configuration for edx-certificates, as that repo and service are no longer used. + + - 2021-07-29 + - Role edxapp + - Add `EDXAPP_ENABLE_MONGODB_INDEXES` configuration variable to optionally set up indexes on edxapp mongodb. + - Role forum + - Add `FORUM_ENABLE_MONGODB_INDEXES` configuration variable to optionally set up indexes on forum mongodb. + + - 2021-07-19 + - Role: edx_django_service + - Allows writing extra requirements to an 'extra.txt' requirements file in the service's requirements directory. + - Role: ecommerce + - Adds an optional flag to write the extra requirements to an 'extra.txt' file since many of the app's setup commands + use tox and that creates its own environments separate from the default ecommerce virtualenv environment where the + `ECOMMERCE_EXTRA_REQUIREMENTS` requirements are installed. + + - 2021-06-17 + - Role credentials + - Installs extra python packages specified in `CREDENTIALS_EXTRA_REQUIREMENTS` (defaults to `[]`). + + - 2021-06-07 + - In `openedx_native.yml` + - Added configuration variable ECOMMERCE_CSRF_TRUSTED_ORIGINS to allow payment mfe to interact with ecommerce service + - Added configuration variable ECOMMERCE_CORS_ORIGIN_WHITELIST to allow cross domain interation between mfes and ecommerce service + - Added new conditional variable MFE_DEPLOY_ECOMMERCE_MFES to not build ecommerce related MFEs w/o ecommerce service + - Created SiteConfiguration for default Site to enable ecommerce MFE + - Added configuration variable EDXAPP_ORDER_HISTORY_MICROFRONTEND_URL + - Set ECOMMERCE_CORS_ALLOW_CREDENTIALS to true + - Added new configuration variable ECOMMERCE_ENABLE_PAYMENT_MFE + - Role ecommerce + - Added new configuration variable ECOMMERCE_ENABLE_PAYMENT_MFE with default value to false + - Updated `create_or_update_site` management command to set `enable-microfrontend-for-basket-page` and `payment-microfrontend-url` flags + - Role mfe_deployer + - Added MFES_ECOMMERCE list for ecommerce related MFEs + - Added new configuration variable MFE_DEPLOY_ECOMMERCE_MFES + - Added new deploy_mfes variable to collect list of all MFEs to deploy + - Changed looping from `MFES` to `deploy_mfes` list internally + - Role mfe_flags_setup + - Added new flag `order_history.redirect_to_microfrontend` + + - 2021-06-05 + - Remove ENABLE_INSTRUCTOR_ANALYTICS setting, which was removed from edx-platform in 2015 + + - 2021-05-24 + - In ``manage_edxapp_users_and_groups.yml`` playbook, allow LMS and CMS + groups to be managed separately via ``manage-groups-lms`` and + ``manage-groups-cms`` tags. These replace the ``manage-groups`` tag, + which will be interepreted as ``manage-groups-lms`` until it is removed. + + - 2021-05-18 + - The version of tubular is controlled by RETIREMENT_SERVICE_VERSION. + Previously it was always "master", which broke older Open edX re-installations. + + - 2021-05-13 + - Role: edx_django_service + - Added task that installs extra python packages specified in `edx_django_service_extra_requirements`. + - Role: discovery + - Installs extra python packages specified in `DISCOVERY_EXTRA_REQUIREMENTS`. + - Role: ecommerce + - Installs extra python packages specified in `ECOMMERCE_EXTRA_REQUIREMENTS`. + + - 2021-03-08 + - Remove instruction from ansile-bootstrap.sh that instructed people to activate + the virtualenv. This was incorrect for community installations. + + - 2021-03-07 + - Role: ecommerce + - Added new configuration variable ECOMMERCE_EXTRA_CONFIG_OVERRIDES, which will allow override any ecommerce settings. + + - 2021-01-20 + - Remove xserver role and all its references. + - This service has been removed per DEPR-95 + + - 2021-01-19 + - Role: edxapp, edx_notes_api, gitreload, xqueue, xserver + - Increase gunicorn limit_request_field_size to 16384 in order to accomodate large cookies. + + - 2021-01-15 + - Role: nginx + - Increase large_client_header_buffers from 4->8 buffers to handle browsers with too much cookie data + + - 2021-01-12 + - Playbook: go-server + - Removed + - Playbook: go-agent + - Removed + - Role: go-server + - Removed + - Role: go-agent + - Removed + + - 2021-01-12 + - Role: nginx + - Increase large_client_header_buffers to 16K to handle browsers with too much cookie data + + - 2021-01-08 + - Role: tinymce_plugins + - Installs `tinymce_plugins` specified in `TINYMCE_ADDITIONAL_PLUGINS_LIST` configuration variable + - Rebuilds TinyMCE files with the newly installed plugins and the previous ones + + - Role: edxapp + - Includes `tinymce_plugins` role in order to install custom TinyMCE plugins, if there are any. + + - 2021-01-05 + - Role: edxapp + - setting `proxy_buffer_size` behind the EDXAPP_SET_PROXY_BUFFER_SIZE flag. + + - 2020-12-11 + - Role: jenkins_master + - Adding variable/tasks to create directories for job virtual + enviroments to be created, as part of removing shiningpanda + as a dependency. + + - 2020-12-09 + - Role: edxapp + - Updated renderer options to reference `common.djangoapps.edxmako` + instead of `edxmako`. The latter import path is deprecated. + Other than removing warnings, there should be no functional + change. + + - 2020-12-02 + - Role: mfe + - Added logo-related configuration settings, with defaults. + + - 2020-12-01 + - Role: edxapp + - Default the CodeJail Python version to the same as the rest of edxapp. + + - Role: edxapp + - Added `EDXAPP_ORGANIZATIONS_AUTOCREATE` variable with default of + `true`. See `ORGANIZATIONS_AUTOCREATE` toggle documentation in + edx-platform/cms/envs/common.py for details. + + - 2020-11-20 + - Role: edxapp + - Updated the worker newrelic config to have the service variant in the app name. This will seperate the names + of the newrelic apps to be `...-lms` and `...-cms` to make it easier to monitor them separately. This will + impact any newrelic monitoring and alerting you have that is linked to the old app name, which should be + updated to use both of the new application names. + - 2020-11-17 + - Removed mentions of ANSIBLE_REPO and ANSIBLE_VERSION since we no longer use our own fork of Ansible. + + - 2020-11-10 + - Role: mfe + - Added role deploy to deploy MFE in a single machine with nginx. + - Open edX + - Use new role to deploy gradebook, profile and account MFEs in native installation. + + - 2020-11-04 + - Role: edxapp + - Stopped rendering legacy auth and env json files that edxapp is no longer reading. Rendering can be reenabled by setting EDXAPP_ENABLE_LEGACY_JSON_CONFIGS to true + + - 2020-10-27 + - Role: notifier + - Removed the notifier role (see DEPR-106 for details) + + - 2020-10-13 + - Role: forums + - Add settings for ES7 upgrade. + + - 2020-09-23 + - Role: certs + - Changed Python version used for creating virtualenv from the system's default (2.7) to 3.8. + + - 2020-09-18 + - Role: nginx + - Add location to support accessing files from `EDXAPP_MEDIA_URL` under the cms site. + + - 2020-09-14 + - Playbook: program_manager + - Removed. It is replaced by program_console + + - Role: program_manager + - Removed. It is replaced by program_console + + - 2020-09-10 + - Playbook: program_console + - Added playbook to setup program-console micro-frontend application on sandboxes + - This is created to replace the program_manager application. The app was renamed + + - Role: program_console + - Created the program-console role for micro-frontend application to be setup + - This is created to replace the program_manager role. The app was renamed + + - 2020-09-03 + - Role: edxapp + - Added `EDXAPP_FEATURES_DEFAULT` and `EDXAPP_FEATURES_EXTRA` that are combined into `EDXAPP_FEATURES` allowing for future options to be added as needed during provisioning. + - Added `EDXAPP_AUTH_USE_OPENID_PROVIDER` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_COMBINED_LOGIN_REGISTRATION` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_CORS_HEADERS` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_COUNTRY_ACCESS` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_CROSS_DOMAIN_CSRF_COOKIE` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_DISCUSSION_HOME_PANEL` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_DISCUSSION_SERVICE` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_ENROLLMENT_RESET` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_GRADE_DOWNLOADS` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_INSTRUCTOR_ANALYTICS` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_LTI_PROVIDER` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_SPECIAL_EXAMS` to allow creating/updating the configuration values during provisioning. + - Added `EDXAPP_ENABLE_VIDEO_UPLOAD_PIPELINE` to allow creating/updating the configuration values during provisioning. + + - 2020-08-26 + - Role: whitelabel + - Removes the whitelabel role and all associated configuration for whitelabel sites. + + - 2020-08-17 + - Role: nginx + - Added `NGINX_ALLOW_PRIVATE_IP_ACCESS` boolean, which allows to disable handling the IP disclosure within private subnetworks. + This is needed by ELB to run health checks while using encrypted connection between ELB and AppServer (`NGINX_ENABLE_SSL`). + Without this enabled, ELB will get `403` response when trying to reach the AppServer via its IP address (it is still impossible to specify the `Host` header for the health check). + + - 2020-08-01 + - Role: edxapp + - Added `EDXAPP_SITE_CONFIGURATION` to allow creating/updating the `SiteConfiguration` values during provisioning. + + - 2020-07-27 + - Role: all + - Convert ansible lowercase variables to upercase. + + - 2020-07-24 + - Role: newrelic_mongo_monitor + - Added the new newrelic_mongo_monitor role and playbook for configuring newrelic infrastructure agent mongodb + integration. + + - 2020-06-30 + - Role: edxapp + - Added COURSE_CATALOG_URL_ROOT that contains root url of course catalog service (discovery service). + + - 2020-06-02 + - Role: edxapp + - Add a new `edxapp_sandbox_python_version` variable that deterimens the python version of the edxapp sandbox + used for instructor python code. This will default to `python3.5` but can be reverted to `python2.7` if necessary. + + - 2020-05-06 + - Role: all + - Split the COMMON_SANDBOX_BUILD variable with its two components: SANDBOX_CONFIG and CONFIGURE_JWTS. + + - Disable install of private requirements for docker devstack. + - 2020-05-05 + - Role: edxapp + - enable paver autocomplete in docker devstack + + - 2020-04-24 + Must be set if user credentials are in the connection string, or use `""` if no user credentials required. + - 2020-04-21 + - Role: forum + - Added `FORUM_MONGO_AUTH_MECH` to allow the authentication mechanism to be configurable. + Defaults to `":scram"`, which is supported by Mongo>=3.0, because `":mongodb_cr"` is removed in Mongo>=4.0. + Use `":mongodb_cr"` for mongo 2.6. + + - 2020-04-14 + - Docker: edxapp + + - Roles: edx_django_service, registrar, enterprise_catalog + - Moved celery worker supervisor config files/scripts into edx_django_service + - Removed the following variables + - ENTERPRISE_CATALOG_WORKER_DEFAULT_STOPWAITSECS + - ENTERPRISE_CATALOG_CELERY_HEARTBEAT_ENABLED + - REGISTRAR_WORKER_DEFAULT_STOPWAITSECS + - REGISTRAR_CELERY_HEARTBEAT_ENABLED + - ENTERPRISE_CATALOG_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING + - ENTERPRISE_CATALOG_NEWRELIC_WORKERS_APPNAME + - REGISTRAR_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING + - REGISTRAR_NEWRELIC_WORKERS_APPNAME + + - 2020-03-31 + - Role: edxapp + - Added Stanford-developed Image Modal XBlock. + + - 2020-03-23 + - Role: edxapp + - Added Stanford-developed SQL Grader XBlock. + + - 2020-03-04 + + - Role: mount_ebs + - Added check for disk size, size is now a required parameter in variables volumes and MONGO_VOLUMES + - This is to prevent mounting the wrong volumes when AWS swaps the order + + - Role: all + - Removed OPENID settings + + - Role: all + - Removed all settings with OIDC in name + + - 2020-02-26 + - Role: edxapp + - Added `ENTERPRISE_LEARNER_PORTAL_HOSTNAME` env var for lms. + + - 2020-02-25 + - Role: all + - Removed the unused task timing callback plugin. + - 2020-02-24 + - Role: ecommerce + - Added `ENTERPRISE_LEARNER_PORTAL_HOSTNAME` env var for ecommerce. + + - 2020-01-31 + - Role: edxapp + - Added Stanford-developed Free Text Response XBlock. + + - Role: edxapp + - Added Stanford-developed Submit-and-Compare XBlock. + + - 2020-01-29 + - Role: edxapp + - Added Stanford-developed Qualtrics and In-Video Quiz XBlocks. + + - Open edX + - Don't use AWS_GATHER_FACTS, it was only for tagging which we don't need. + + - 2020-01-24 + - Open edX + - The wrong version of xqueue was being installed, fixed. + + + `EDXAPP_RETIREMENT_SERVICE_USER_NAME` to generic_env_config to allow user retirement to be configurable. + - 2020-01-21 + - Role: enterprise_catalog + - Added infrstructure to start up and deploy celery workers + + - 2020-01-07 + - Role: insights + - install libssl-dev, needed for mysqlclient + - 2020-01-03 + - Role: insights + - add DOT config (deprecate DOP) + + - 2019-12-26 + - Role: edxapp + - Added Celery worker `prefetch_optimization` option to allow switching from 'default' to 'fair' (only write to available worker processes) + + - 2019-12-20 + - Open edX + - native.sh needed to uninstall pyyaml to proceed + + - 2019-12-09 + - Role: enterprise_catalog + - Create role + + - 2019-12-04 + - Role: blockstore + - Increased upload limit to 10M + + - 2019-11-12 + - Role: ecommerce + - Fixed paypal payment processor default configuration + + - 2019-08-30 + - Role: edxapp + - Added `ENABLE_PUBLISHER` for indicating that the publisher frontend service is in use + + - Role: discovery + - Added `ENABLE_PUBLISHER` for indicating that the publisher frontend service is in use + + - 2019-08-02 + - Role: edxapp + - Added `ENABLE_ENROLLMENT_RESET` feature flag for masters integration sandboxes + + - 2019-08-01 + - Role: conductor + - New role added to configure the conductor service + + - Set CORS_ORIGIN_WHITELIST. + - 2019-07-22 + - Role: jwt_signature + - Added role to inject JWT signing keys into application config, used from edxapp, worker, and registrar. + + - 2019-07-15 + - Playbook: masters_sandbox_update + - Create edx partner + + - 2019-07-12 + - Role: registrar + - Set CSRF_TRUSTED_ORIGINS. + + - Role: registrar + + - 2019-07-11 + - Role: discovery + - Override DISCOVERY_MYSQL_REPLICA_HOST to `edx.devstack.mysql` in docker. + + - 2019-07-10 + - Playbook: masters_sandbox + - Include call to create_api_access_request + + - 2019-07-09 + - Role: discovery + - Add mysql replica settings to env config. + + - 2019-07-05 + - Playbook: program_manager + - Added playbook to setup program-manager micro-frontend application on sandboxes + + - Role: program_manager + - Created the program-manager role for micro-frontend application to be setup + + - 2019-06-24 + - Role: common_vars + - Default `COMMON_JWT_PUBLIC_SIGNING_JWK_SET` to `''` + instead of `!!null`. Because of how this setting is handled, + `!!null` ends up rendering as the literal string `None` instead + of the value `null`, which causes JSON decoding to fail + wherever the default value is used (as `'None'` is not valid JSON). + By setting the default to a Falsy value like the + empty string, edx-drf-extensions does not attempt to JSON- + decode it. + + - 2019-06-20 + - Playbook: masters_sandbox + - Added playbook to setup user and api access + + - 2019-06-19 + - Role: registrar + - Changed `REGISTRAR_CELERY_ALWAYS_EAGER` default to `false`. + + - Role: registrar + - Added `REGISTRAR_CELERY_ALWAYS_EAGER` with default `True`. + - Injected above settings as environment variable for Registrar. + + - Role: supervisor + - Add registrar to `pre_supervisor_checks.py` + + - Role: registrar + - Added `registrar-workers.conf.j2` + - Add task to generate `registrar-workers.conf` from `registrar-workers.conf.j2` + - Added `REGISTRAR_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING` + - Added `REGISTRAR_WORKER_DEFAULT_STOPWAITSECS` + - Added `REGISTRAR_CELERY_HEARTBEAT_ENABLED` + - Added `REGISTRAR_NEWRELIC_WORKERS_APPNAME` + - Added `REGISTRAR_CELERY_WORKERS` + + - Role: registrar + - Added `REGISTRAR_CELERY_BROKER_TRANSPORT`. + - Added `REGISTRAR_CELERY_BROKER_USER`. + - Added `REGISTRAR_CELERY_BROKER_PASSWORD`. + - Added `REGISTRAR_CELERY_BROKER_HOSTNAME`. + - Added `REGISTRAR_CELERY_BROKER_VHOST`. + - Injected all above settings as environment variables for Registrar. + + - Role: registrar + - Added `REGISTRAR_API_ROOT` + - Modified `REGISTRAR_MEDIA_URL`. + + - Role: edx_django_service + - Added new overridable variable `edx_django_service_api_root` + + - Role: registrar + - Replaced `REGISTRAR_MEDIA_ROOT`. + - Added `REGISTRAR_MEDIA_STORAGE_BACKEND`. + + - Role: registrar + - Replaced `REGISTRAR_LMS_URL_ROOT` with `REGISTRAR_LMS_BASE_URL`. + - Replaced `REGISTRAR_DISCOVERY_API_URL` with `REGISTRAR_DISCOVERY_BASE_URL`. + + - Role: registrar + - Added `REGISTRAR_SEGMENT_KEY` for segment.io event tracking. + + - Role: registrar + - Added `REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY` for oauth2. + - Added `REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET` for oauth2. + - Added `REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY` for backend auth. + - Added `REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET` for backend auth. + - Added `REGISTRAR_SERVICE_USER_EMAIL` to have a registrar service user on LMS + - Added `REGISTRAR_SERVICE_USER_NAME` to have a registrar service user on LMS + + - Role: registrar + - Create role + + - 2019-06-12 + - Role: oauth_client_setup + - Ensure that created DOT applications have corresponding ApplicationAccess records with user_id scope. + + - Role: edx_notes_api + - Added `EDX_NOTES_API_HOSTNAME` to set a hostname for the edx-notes-api IDA. + + - Open edX + - Added `SANDBOX_ENABLE_NOTES` to enable/disable setting up the edx-notes-api IDA. + + - 2019-06-05 + - Role: registrar + - Change default celery queue to `registrar.default`, explicitly set default exchange and routing key. + + - 2019-05-24 + - Role: xserver + - Remove xserver from sandbox builds. + + - Role: registrar + - Add registrar to sandbox builds. + + - 2019-05-10 + - Role: edxapp + - Added ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS to allow for edx specific query params to be added for business marketing footer. + + - 2019-05-09 + - Role: designer + - 2019-05-08 + - Create role + + - 2019-04-16 + - Role: edxapp + - Removed the OfficeMix XBlock (the service that it uses has been dead for months). + + - 2019-03-28 + - Role: edxapp + - Added 'SYSTEM_WIDE_ROLE_CLASSES' for use of edx-rbac roles in the jwt in the lms + + - 2019-02-20 + - Open edX + - Renamed edx_sandbox.yml to openedx_native.yml + + - Role: nginx + - Added CORS Access-Control-Allow-Origin for static assets. + - Replaced wildcard Access-Control-Allow-Origin header for fonts. Make sure you set EDXAPP_CORS_ORIGIN_WHITELIST to include all your domains. + + - 2019-02-14 + - Role: ecomworker + - Added `assignment_email` default template value in `SAILTHRU` config to send offer assignment emails. + + - Added CORS_ORIGIN_WHITELIST and CORS_URLS_REGEX to allow selective CORS whitelisting of origins/urls. + + - Remove unused JWT_SECRET_KEYS. + - Transformed the JWT_ISSUERS to match the format expected by edx-drf-extensions jwt_decode_handler. + - 2019-02-11 + + - 2019-02-05 + - Role: ecommerce + - common_vars + - Added new overridable variable `COMMON_LMS_BASE_URL`. + + - 2019-01-18 + - Role: discovery + - Added `DISCOVERY_CORS_ORIGIN_WHITELIST` to allow CORS whitelisting of origins. + + - 2019-01-14 + - Role: nginx + - Modified robots.txt.j2 to accept the Allow rule. + - Modified robots.txt.j2 to accept either a single string or a list of strings for agent, disallow, and allow. + + - 2019-01-09 + - abbey.py + - Removed abbey.py + + - 2019-01-03 + - Render auth and env config to a single yml file + - 2019-01-02 + - Role: edxapp + - Renamed proctoring backend setting to work with edx-proctoring 1.5.0 + + - 2018-11-20 + - Role: edxapp + - Remove low priority queue, use default instead. + + - 2018-11-14 + - Role: ecommerce + + + + - 2018-11-07 + - Role: ecommerce + - 2018-11-05 + - Role: edxapp + - Added `ENTERPRISE_CUSTOMER_SUCCESS_EMAIL` to lms_env_config for configuring emails to the customer success team. + - 2018-10-31 + - Role: edx_django_service + - Added new overridable variable `edx_django_service_gunicorn_max_requests` + - Role: ecommerce + - Set default max_requests to 3000.(eg. restart gunicorn process every 3000 requests.) + + - 2018-10-03 + - Role: edx_notes_api + - Added `JWT_AUTH` to edx-notes-api that is used in other IDAs. + + - 2018-10-01 + - Role: edxapp + - Removed `PASSWORD_MIN_LENGTH`, `PASSWORD_MAX_LENGTH`, and `PASSWORD_COMPLEXITY` in favor of specifying these in `AUTH_PASSWORD_VALIDATORS`. + + - Role: edxapp + - Added `AUTH_PASSWORD_VALIDATORS` to utilize Django's password validation. Base validators included in configuration are UserAttributeSimilarity to test the password against the username and email using the default similarity threshold of 0.7 (1.0 fails exact matches only), MinimumLength to test password minimum length, and MaximumLength to test password maximum length. + + - 2018-09-29 + - Role: edxapp + - Added `EDXAPP_LOGIN_REDIRECT_WHITELIST` which provides a whitelist of domains to which the login/logout pages will redirect. + + - 2018-09-17 + + - Role: edxapp + - `EDXAPP_EDXAPP_SECRET_KEY` no longer has a default value + + - 2018-08-30 + - Role: edxapp + - `EDXAPP_CACHE_BACKEND` added to allow overriding Django's memcache backend + + - 2018-08-28 + - Role: prospectus + - New role added to configure the prospectus service + + - 2018-08-14 + - Removed the obsolete install_stack.sh file (the last reference to fullstack) + + - 2018-08-07 + - Role: analytics_api + - Added `basic_auth_exempted_paths` configuration for enterprise api endpoints + + - 2018-08-06 + - Role: edx_django_service + - Added optional `edx_django_service_allow_cors_headers` boolean option to pass CORS headers (`Access-Control-Allow-Origin` and `Access-Control-Allow-Methods`) on non basic-auth + calls to support `/api` endpoints for analytics_api. + + - 2018-08-03 + - Role: edxapp + - `EDXAPP_X_FRAME_OPTIONS` added in studio to prevent clickjacking. + + - 2018-08-02 + - Role: analytics_api + - Added `ANALYTICS_API_CORS_ORIGIN_WHITELIST` to allow CORS whitelisting of origins. + + - 2018-07-31 + - Role: nginx + - Added `NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS` to be able to use custom static error pages for error responses from the LMS. + - Added `NGINX_SERVER_HTML_FILES_TEMPLATE` to make the error file template configurable. + - Added `NGINX_SERVER_STATIC_FILES` to allow copying static contents to the server static folder. Can be used to deploy static contents for the error pages for example. + + - Role: edxapp + - Added `EDXAPP_X_FRAME_OPTIONS` to prevent click jacking in LMS. + + - 2018-07-11 + - sandbox.sh has been renamed native.sh to better indicate what it does. + - 2018-07-10 + - git_clone: + - The working tree is explicitly checked for modified files, to prevent mysterious failures. + + - 2018-07-05 + - Installation + - OPENEDX_RELEASE is now required, to prevent accidental installation of master. + + - 2018-06-21 + - XQueue + - Expose CLOUDWATCH_QUEUE_COUNT_METRIC which is defined XQueue's settings.py for further dictionary structure + + - 2018-06-12 + - Role: edxapp + - Create EDXAPP_CMS_GUNICORN_TIMEOUT and EDXAPP_LMS_STATIC_URL_BASE to allow overriding of the gunicorn timeout + + - 2018-06-11 + - nginx: + - remove nginx_cfg - an internal variable that was really only used for the edx-release nginx site, which served version.{html,json} off of a nonstandard port. The file it served was never populated. + + - 2018-06-07 + - Structure: edx-east + - Deprecated the edx-east folder, playbooks now live in the top level directory instead of edx-east/playbooks. A symbolic link was added for now, but should not be relied upon. + + - EDXAPP_LMS_STATIC_URL_BASE and EDXAPP_CMS_STATIC_URL_BASE allow a per-application setting of the static URL. You can stil use EDXAPP_STATIC_URL_BASE for now but we may retire that as we continue to separate LMS and CMS. + - 2018-06-06 + - Role: edxapp + - EDXAPP_NGINX_SKIP_ENABLE_SITES added to allow you to not sync in the lms or cms nginx configuration. Instead you can enable them during deployment. + - EDXAPP_NGINX_DEFAULT_SITES added to allow you to mark both lms and cms as defaults, best paired with picking which site to enable during deployment. + + - 2018-05-11 + - XQUEUE_SETTINGS now prefers production.py over aws_settings.py + - 2018-05-09 + - Role: credentials + - Set `LANGUAGE_COOKIE_NAME` so that Credentials will use the global language cookie. + + - 2018-05-08 + - Role: edxapp + - Added `PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG` to make configurable whether password complexity is checked on login and how such complexity is rolled out to users. + + - 2018-05-03 + - Role: XQueue + - Convert to a yaml config (instead of xqueue.auth.json and xqueue.env.json we get xqueue.yml and it lives by default in /edx/etc/xqueue.yml like standard IDAs) + - Add XQUEUE_DEFAULT_FILE_STORAGE so that you can specify S3 or Swift in your config + + - 2018-04-25 + - Role: edxapp + - Added `RETIREMENT_STATES` to generic_env_config to support making the retirement workflow configurable. + + - 2018-04-19 + - Removed Vagrantfiles for devstack and fullstack, and supporting files. + + - Role: xqueue + - Added XQUEUE_SUBMISSION_PROCESSING_DELAY and XQUEUE_CONSUMER_DELAY to xqueue env so they can be passed along to the app. + + - 2018-04-13 + - Role: edxapp + - Added GOOGLE_SITE_VERIFICATION_ID to move a previously hardcoded value into configuration. + - Changed `EDXAPP_RETIRED_USERNAME_FMT` to `EDXAPP_RETIRED_USERNAME_PREFIX`. Changed/split `EDXAPP_RETIRED_EMAIL_FMT` to be `EDXAPP_RETIRED_EMAIL_PREFIX` and `EDXAPP_RETIRED_EMAIL_DOMAIN`. + + XQUEUE_RABBITMQ_USER XQUEUE_RABBITMQ_PASS XQUEUE_RABBITMQ_VHOST XQUEUE_RABBITMQ_HOSTNAME + + - Added `EDXAPP_RETIRED_USERNAME_FMT`, `EDXAPP_RETIRED_EMAIL_FMT`, `EDXAPP_RETIRED_USER_SALTS`, and + - 2018-04-12 + - Retired XQUEUE_WORKERS_PER_QUEUE + - 2018-04-11 + - Role: edxapp + - Moved `PASSWORD_MIN_LENGTH`, `PASSWORD_MAX_LENGTH`, and `PASSWORD_COMPLEXITY` to generic_env_config to allow CMS and LMS to share these configurations + + - 2018-04-09 + - Added XQUEUE_CONSUMER_NEWRELIC_APPNAME which is added to the supervisor start of xqueue_consumer + if you have New Relic enabled. + - 2018-04-04 + - Role xqueue + - Removed RabbitMQ in earlier changes in XQueue itself, we don't need any of the configuration + XQUEUE_RABBITMQ_PORT XQUEUE_RABBITMQ_TLS + - 2018-04-02 + - Added NEWRELIC_APPNAME and NEWRELIC_LICENSE_KEY to the configuration files consumed by XQueue. + Useful for external utilities that are reporting NR metrics. + - Role: edxapp + + - 2018-03-28 + - Role: xqueue + - Added XQUEUE_MYSQL_CONN_MAX_AGE so that you can have xqueue use django's persistent DB connections + - 2018-03-22 + - Role edx_django_service + - Added maintenance page under the flag EDX_DJANGO_SERVICE_ENABLE_S3_MAINTENANCE. + - Added the s3_maintenance.j2 file to point to the s3 maintenance page. + + + - 2018-03-20 + - Role: splunkforwarder + - Updated the role so the splunkforwarder can be installed on Amazon Linux OS environment, which is a RHEL variant + + - Role: server_utils + - Update to only do things for debian varient environment + + - 2018-03-08 + - Role: edxapp + - Added empty `EDXAPP_PASSWORD_COMPLEXITY` setting to ease overriding complexity. + + - 2018-02-27 + - The manage_users management command is only run when disable_edx_services is false (previously this play would try + to update databases while building images, where services are generally disabled). + - 2018-02-22 + - Role: xqueue + - Remove S3_BUCKET and S3_PATH_PREFIX - they were deprecated prior to ginkgo + - Remove SERVICE_VARIANT - it was copied from edxapp but never truly used (except to complicate things) + + - 2018-02-09 + - Added `CERTS_QUEUE_POLL_FREQUENCY` to make configurable the certificate agent's queue polling frequency. + - 2018-02-06 + - Role: certs + + - 2018-02-02 + - Role: xqueue + - Added `XQUEUE_SESSION_ENGINE` to allow a configurable xqueue session engine. + - Added `XQUEUE_CACHES` to allow a configurable xqueue cache. + + + - 2018-01-31 + - Role: devpi + - New role added to configure a devpi service as a pass-through cache for PyPI. + + - Role: devpi_consumer + - Added role to configure Python containers to use devpi for Docker Devstack + + - 2018-01-26 + - Role: edxapp + - Added `ENTERPRISE_REPORTING_SECRET` to CMS auth settings to allow edx-enterprise migrations to run. + + - 2018-01-25 + - Role: edxapp + - Added `EDXAPP_FERNET_KEYS` to allow for use of django-fernet-keys in LMS. + - 2018-01-04 + - Role: nginx + - Added `NGINX_EDXAPP_DEFAULT_SITE_THEME` to allow to completely + override `favicon.ico` file when Comprehensive Theme is enabled. + + - 2017-12-14 + - Role: edxapp + - Added `EDX_PLATFORM_REVISION` (set from `edx_platform_version`). This is for + edx-platform debugging purposes, and replaces calling dealer.git at startup. + + - 2017-12-07 + - Role: edxapp + - Added `EDXAPP_BRANCH_IO_KEY` to configure branch.io journey app banners. + + - 2017-12-06 + - Role: veda_pipeline_worker + - New role to run all (`deliver, ingest, youtubecallback`) [video pipeline workers](https://github.com/edx/edx-video-pipeline/blob/master/bin/) + + - Role: ecomworker + - Added `ECOMMERCE_WORKER_BROKER_TRANSPORT` with a default value of 'ampq' to be backwards compatible with rabbit. Set to 'redis' if you wish to use redis instead of rabbit as a queue for ecommerce worker. + + - 2017-12-05 + - Added `ECOMMERCE_BROKER_TRANSPORT` with a default value of 'ampq' to be backwards compatible with rabbit. Set to 'redis' if you wish to use redis instead of rabbit as a queue for ecommerce. + - 2017-12-04 + - Role: ecommerce + + - 2017-12-01 + - Role: credentials + - This role is now dependent on the edx_django_service role. Settings are all the same, but nearly all of the tasks are performed by the edx_django_service role. + + - 2017-11-29 + - Role: veda_delivery_worker + - New role added to run [video delivery worker](https://github.com/edx/edx-video-pipeline/blob/master/bin/deliver) + + - 2017-11-23 + - Added `EDXAPP_DEFAULT_COURSE_VISIBILITY_IN_CATALOG` setting (defaults to `both`). + + - Added `EDXAPP_DEFAULT_MOBILE_AVAILABLE` setting (defaults to `false`). + + - 2017-11-21 + - Role: veda_ffmpeg + - New role added to compile ffmpeg for video pipeline. It will be used as a dependency for video pipeline roles. + + - 2017-11-15 + - Role: nginx + - Modified `lms.j2` , `cms.j2` , `credentials.j2` , `edx_notes_api.j2` and `insights.j2` to enable HTTP Strict Transport Security + - Added `NGINX_HSTS_MAX_AGE` to make HSTS header `max_age` value configurable and used in templates + - 2017-11-14 + - Role: edxapp + - Added `EDXAPP_MONGO_REPLICA_SET`, which is required to use + + + - 2017-11-13 + + - Role: edxapp + - Added `EDXAPP_ZENDESK_OAUTH_ACCESS_TOKEN` for making requests to Zendesk through front-end. + - 2017-11-09 + - Role: edxapp + - Added `EDXAPP_LMS_INTERNAL_ROOT_URL` setting (defaults to `EDXAPP_LMS_ROOT_URL`). + + - 2017-11-07 + - Role: edxapp + - Added `EDXAPP_CELERY_BROKER_TRANSPORT` and renamed `EDXAPP_RABBIT_HOSTNAME` + to `EDXAPP_CELERY_BROKER_HOSTNAME`. This is to support non-amqp brokers, + specifically redis. If `EDXAPP_CELERY_BROKER_HOSTNAME` is unset it will use + the value of `EDXAPP_RABBIT_HOSTNAME`, however it is recommended to update + your configuration to set `EDXAPP_CELERY_BROKER_TRANSPORT` explicitly. + + - 2017-11-03 + - Role: server_utils + - Install "vim", not "vim-tiny". + + - Role: edxapp + - Added GOOGLE_ANALYTICS_TRACKING_ID setting for inserting GA tracking into emails generated via ACE. + + - 2017-10-30 + - Role: edxapp + - Added `EDXAPP_REINDEX_ALL_COURSES` to rebuild the course index on deploy. Disabled by default. + + - 2017-10-26 + - Role: ecommerce + - This role is now dependent on the edx_django_service role. Settings are all the same, but nearly all of the tasks are performed by the edx_django_service role. + + - 2017-10-24 + - Role: notifier + - Added notifier back to continuous integration. + + - 2017-10-20 + pymongo.MongoReplicaSetClient in PyMongo 2.9.1. This should be set to the + name of your replica set. + This setting causes the `EDXAPP_*_READ_PREFERENCE` settings below to be used. + - Added `EDXAPP_MONGO_CMS_READ_PREFERENCE` with a default value of `PRIMARY`. + - Added `EDXAPP_MONGO_LMS_READ_PREFERENCE` with a default value of + `SECONDARY_PREFERED` to distribute the read workload across the replica set + for replicated docstores and contentstores. + - Added `EDXAPP_LMS_SPLIT_DOC_STORE_READ_PREFERENCE` with a default value of + `EDXAPP_MONGO_LMS_READ_PREFERENCE`. + - Added `EDXAPP_LMS_DRAFT_DOC_STORE_CONFIG` with a default value of + `EDXAPP_MONGO_CMS_READ_PREFERENCE`, to enforce consistency between + Studio and the LMS Preview modes. + - Removed `EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS`, since there is no notion of + common options to the content store anymore. + - 2017-10-19 + - Role: veda_web_frontend + - New role added for [edx-video-pipeline](https://github.com/edx/edx-video-pipeline) + + - 2017-10-07 + - Role: discovery + - Added `DISCOVERY_REPOS` to allow configuring discovery repository details. + + - Role: edx_django_service + - Made the keys `edx_django_service_git_protocol`, `edx_django_service_git_domain`, and `edx_django_service_git_path` of `edx_django_service_repos` all individually configurable. + + - 2017-10-05 + + - Role: whitelabel + - Added `WHITELABEL_THEME_DIR` to point to the location of whitelabel themes. + - Added `WHITELABEL_ADMIN_USER` to specify an admin user. + - Added `WHITELABEL_DNS` for DNS settings of themes. + - Added `WHITELABEL_ORG` for whitelabel organization settings. + - 2017-09-26 + - Role: edxapp + - Added `EDXAPP_EXTRA_MIDDLEWARE_CLASSES` for configuring additional middleware logic. + + - 2017-09-25 + - Role: discovery + - Updated LANGUAGE_CODE to generic english. Added configuration for multilingual language package django-parler. + + - 2017-09-14 + - Role: edxapp + - Added `EDXAPP_SCORM_PKG_STORAGE_DIR`, with default value as it was in the server template. + - Added `EDXAPP_SCORM_PLAYER_LOCAL_STORAGE_ROOT`, with default value as it was in the server template. + + - Role: edxapp + - Added `ENTERPRISE_SUPPORT_URL` variable used by the LMS. + + - 2017-09-13 + - Role: discovery + - Added `OPENEXCHANGERATES_API_KEY` for retrieving currency exchange rates. + + - 2017-09-12 + - Added `EDXAPP_PLATFORM_DESCRIPTION` used to describe the specific Open edX platform. + - 2017-09-11 + - Role: edxapp + - Added `EDXAPP_ENTERPRISE_TAGLINE` for customized header taglines for different enterprises. + + - 2017-09-05 + - Role: edxapp + - Added OAUTH_DELETE_EXPIRED to enable automatic deletion of edx-django-oauth2-provider grants, access tokens, and refresh tokens as they are consumed. This will not do a bulk delete of existing rows. + + - 2017-08-23 + - Role: mongo_3_2 + - Added role for mongo 3.2, not yet in use. + - Removed MONGO_CLUSTERED variable. In this role mongo replication is always configured, even if there is only one node. + + - 2017-08-16 + - Removed unused `EDXAPP_BOOK_URL` setting + - 2017-08-08 + + - Role: credentials + - Replaced `CREDENTIALS_OAUTH_URL_ROOT` with `COMMON_OAUTH_URL_ROOT` from `common_vars` + - Replaced `CREDENTIALS_OIDC_LOGOUT_URL` with `COMMON_OAUTH_LOGOUT_URL` from `common_vars` + - Replaced `CREDENTIALS_JWT_AUDIENCE` with `COMMON_JWT_AUDIENCE` from `common_vars` + - Replaced `CREDENTIALS_JWT_ISSUER` with `COMMON_JWT_ISSUER` from `common_vars` + - Replaced `CREDENTIALS_JWT_SECRET_KEY` with `COMMON_JWT_SECRET_KEY` from `common_vars` + - Replaced `CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_ISSUER` with `COMMON_JWT_ISSUER` from `common_vars` + + - Role: ecommerce + - Replaced `ECOMMERCE_OAUTH_URL_ROOT` with `COMMON_OAUTH_URL_ROOT` from `common_vars` + - Replaced `ECOMMERCE_OIDC_LOGOUT_URL` with `COMMON_OAUTH_LOGOUT_URL` from `common_vars` + - Replaced `ECOMMERCE_JWT_SECRET_KEY` with `COMMON_JWT_SECRET_KEY` from `common_vars` + - Replaced `ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_ISSUER` with `COMMON_JWT_ISSUER` from `common_vars` + - 2017-08-04 + - Role: edxapp + - Added `PASSWORD_MIN_LENGTH` for password minimum length validation on reset page. + - Added `PASSWORD_MAX_LENGTH` for password maximum length validation on reset page. + - 2017-08-03 + + - Role: edxapp + - Added `EDXAPP_VIDEO_TRANSCRIPTS_SETTINGS` to configure S3-backed video transcripts. + - 2017-07-28 + - Role: edxapp + - Added creation of enterprise_worker user to provisioning. This user is used by the edx-enterprise package when making API requests to Open edX IDAs. + + - 2017-07-25 + - Role: neo4j + - Increase heap and page caches sizes for neo4j + + - 2017-07-21 + + - Role: edxapp + - Remove EDXAPP_ANALYTICS_API_KEY, EDXAPP_ANALYTICS_SERVER_URL, EDXAPP_ANALYTICS_DATA_TOKEN, EDXAPP_ANALYTICS_DATA_URL since they are old and + no longer consumed. + + - 2017-07-18 + + - Role: insights + - Moved `THEME_SCSS` from `INSIGHTS_CONFIG` to `insights_environment` + - 2017-07-14 + - Role: forum + - Added `FORUM_REBUILD_INDEX` to rebuild the ElasticSearch index from the database, when enabled. Default: `False`. + + + - Role: insights + - Removed `bower install` task + - Replaced r.js build task with webpack build task + - 2017-07-13 + - Removed `./manage.py compress` task + + - Role: analytics_api + - Added a number of `ANALYTICS_API_DEFAULT_*` and `ANALYTICS_API_REPORTS_*` variables to allow more selective specification of database parameters (rather than + overriding the whole structure). + - 2017-07-06 + - Removed authentication requirement for neo4j + - 2017-06-30 + + - Role: insights + - Added `INSIGHTS_DOMAIN` to configure the domain Insights is deployed on + - Added `INSIGHTS_CLOUDFRONT_DOMAIN` to configure the domain static files can be served from + - Added `INSIGHTS_CORS_ORIGIN_WHITELIST_EXTRA` to configure allowing CORS on domains other than the `INSIGHTS_DOMAIN` + - 2017-06-28 + - Role: edxapp + - Let `confirm_email` in `EDXAPP_REGISTRATION_EXTRA_FIELDS` default to `"hidden"`. + - Let `terms_of_service` in `EDXAPP_REGISTRATION_EXTRA_FIELDS` default to `"hidden"`. + + + - 2017-06-27 + - Role: ecommerce + - Added ECOMMERCE_LANGUAGE_COOKIE_NAME which is the name of the cookie the ecommerce django app looks at for determining the language preference. + - 2017-06-26 + - Role: neo4j + - Enabled splunk forwarding for neo4j logs. + - Increased maximum amount of open files to 40000, as suggested by neo4j. + - Updated the java build that neo4j uses to run. + + - 2017-06-22 + + - Role: edxapp + - Added `EDXAPP_BASE_COOKIE_DOMAIN` for sharing cookies across edx domains. + - 2017-06-21 + - Role: edxapp + - Set the default value for EDXAPP_POLICY_CHANGE_GRADES_ROUTING_KEY to + 'edx.lms.core.default'. + + - Role: edxapp + - Set the default value for EDXAPP_BULK_EMAIL_ROUTING_KEY_SMALL_JOBS to + 'edx.lms.core.low'. + + - 2017-06-16 + - Role: neo4j + - Updated neo4j to 3.2.2 + + - 2017-06-15 + - Role: jenkins_master + - Update pinned use of JDK7 in Jenkins installs to default JDK version from role `oraclejdk`. + + - 2017-06-12 + - Role: elasticsearch + - Replaced `elasticsearch_apt_key` and `elastic_search_apt_keyserver` with `elasticsearch_apt_key_url` + - Updated elasticsearch version to 1.5.0 + + - 2017-06-08 + - Role: edxapp + - Set the EDXAPP_IMPORT_EXPORT_BUCKET setting to an empty string + + - 2017-06-07 + - Role: edxapp + - Updated default value of the EDXAPP_ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES setting to ["audit", "honor"] + + - Role: edx_notes_api + - Removed EDX_NOTES_API_ELASTICSEARCH_HOST. + - Removed EDX_NOTES_API_ELASTICSEARCH_PORT. + - EDX_NOTES_API_ELASTICSEARCH_URL. + + + - Role: insights + - Removed `SUPPORT_EMAIL` setting from `INSIGHTS_CONFIG`, as it is was replaced by `SUPPORT_URL`. + - 2017-06-05 + + - Role: insights + - Removed `INSIGHTS_FEEDBACK_EMAIL` which is no longer used, as it was deemed redundant with `INSIGHTS_SUPPORT_EMAIL`. + - 2017-06-01 + - Role: nginx + - Modified `server-template.j2` to be more accessible and configurable. + - The template should contain the `lang` attribute in the HTML tag. + - If the image loaded has some meaning, as a logo, it should have the `alt` attribute. + - After the header 1 (h1) there is no relevant text content, so next it can not be + another header (h2). It was changed to be a paragraph with the header 2 CSS style. + - Added `NGINX_SERVER_ERROR_IMG_ALT` with default value as it was in the server template + - Added `NGINX_SERVER_ERROR_LANG` with default value `en` + - Added `NGINX_SERVER_ERROR_STYLE_H1` with default value as it was in the server template + - Added `NGINX_SERVER_ERROR_STYLE_P_H2` with default value as it was in the server template + - Added `NGINX_SERVER_ERROR_STYLE_P` with default value as it was in the server template + - Added `NGINX_SERVER_ERROR_STYLE_DIV` with default value as it was in the server template + + - 2017-05-31 + - Role: edxapp + - Install development.txt in Vagrant and Docker devstacks + + - 2017-05-26 + - Role: edxapp + - Added the EDXAPP_ACTIVATION_EMAIL_SUPPORT_LINK URL with default value `''`. + - Added the EDXAPP_PASSWORD_RESET_SUPPORT_LINK URL with default value `''`. + + - 2017-05-23 + - Role: edxapp + - Added the EDXAPP_SHOW_HEADER_LANGUAGE_SELECTOR feature flag with default value [false] + - Added the EDXAPP_SHOW_FOOTER_LANGUAGE_SELECTOR feature flag with default value [false] + + - Role: edxapp + - Added the EDXAPP_ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES setting with default value ["audit"] + + - 2017-05-15 + - Role: nginx + - Added `NGINX_EDXAPP_CMS_APP_EXTRA`, which makes it possible to add custom settings to the site configuration for Studio. + - Added `NGINX_EDXAPP_LMS_APP_EXTRA`, which makes it possible to add custom settings to the site configuration for the LMS. + + - 2017-05-04 + + - Role: edxapp + - Added `EDXAPP_VIDEO_IMAGE_SETTINGS` to configure S3-backed video images. + - 2017-04-24 + - Role: edxapp + - DOC_LINK_BASE settings have been removed, replaced by HELP_TOKENS_BOOKS + + - Role: edxapp + - Add the EDXAPP_LANGUAGE_COOKIE setting + + - 2017-04-12 + - Added a new EDXAPP_MYSQL_CONN_MAX_AGE, default to 0. Adjust it to change how long a connection is kept open + for reuse before it is closed. + - 2017-04-11 + - Role: rabbitmq + - Upgraded to 3.6.9 + - Switched to a PPA rather than a .deb hosted in S3 + - Note that you generally cannot upgrade RabbitMQ live in place https://www.rabbitmq.com/clustering.html + this is particularly true coming from 3.2 to 3.6. We are using the shovel plugin to move tasks across clusters + but their documentation covers different scenarios. + - 2017-03-31 + - Role: edxapp + - Set preload_app to False in gunicorn config for LMS and Studio. + - 2017-03-13 + + - Role: edxapp + - Added `EDXAPP_BLOCK_STRUCTURES_SETTINGS` to configure S3-backed Course Block Structures. + - 2017-03-07 + - Role: analytics_api + - Added `ANALYTICS_API_AGGREGATE_PAGE_SIZE`, default value 10. Adjust this parameter to increase the number of + aggregate search results returned by the Analytics API, i.e. in course_metadata: enrollment_modes, cohorts, and + segments. + - 2017-02-27 + - Role: xqueue + - Changed `XQUEUE_RABBITMQ_TLS` default from `true` to `false`. + - Added `XQUEUE_RABBITMQ_TLS` to allow configuring xqueue to use TLS when connecting to the AMQP broker. + - Added `XQUEUE_RABBITMQ_VHOST` to allow configuring the xqueue RabbitMQ host. + - Added `XQUEUE_RABBITMQ_PORT` to allow configuring the RabbitMQ port. + - Added `EDXAPP_CELERY_BROKER_USE_SSL` to allow configuring celery to use TLS. + - 2017-02-24 + - Role: programs + - This role has been removed as this service is no longer supported. The role is still available on the [Ficus branch](https://github.com/edx/configuration/releases/tag/open-release%2Fficus.1). + - 2017-02-16 + - Role: mongo_2_6 + - Added `MONGO_AUTH` to turn authentication on/off. Auth is now enabled by default, and was previously disabled by default. + + - Added `MONGO_AUTH` to turn authentication on/off. Auth is now enabled by default, and was previously disabled by default. + - 2017-02-14 + - Role: notifier + - Added `NOTIFIER_DATABASE_ENGINE`, `NOTIFIER_DATABASE_NAME`, `NOTIFIER_DATABASE_USER`, `NOTIFIER_DATABASE_PASSWORD`, `NOTIFIER_DATABASE_HOST`, and `NOTIFIER_DATABASE_PORT` to be able to configure the `notifier` service to use a database engine other than sqlite. Defaults to local sqlite. + - Deprecated: `NOTIFIER_DB_DIR`: Please use `NOTIFIER_DATABASE_NAME` instead. + + - 2017-02-02 + - Support parsing the replset JSON in 3.2 and 3.0 + + - Role: ecommerce + - Removed `SEGMENT_KEY` which is no longer used. Segment key is now defined in DB configuration. (https://github.com/edx/ecommerce/pull/1121) + - 2017-02-01 + + - Role: ecommerce + - Added `ECOMMERCE_ENTERPRISE_URL` for the `enterprise` API endpoint exposed by a new service `edx-enterprise` (currently hosted by `LMS`), which defaults to the existing setting `ECOMMERCE_LMS_URL_ROOT`. + - 2017-01-12 + - Role: credentials + - Added `CREDENTIALS_EXTRA_APPS` to enable the inclusion of additional Django apps in the Credentials Service. + - 2017-01-10 + - Added `COMMON_EDXAPP_SETTINGS`. Default: `aws` + - 2016-11-18 + + - Role: mongo_3_0 + - Changed MONGO_STORAGE_ENGINE to default to wiredTiger which is the default in 3.2 and 3.4 and what edX suggests be used even on 3.0. + If you have a mmapv1 3.0 install, override MONGO_STORAGE_ENGINE to be mmapv1 which was the old default. + - 2016-11-03 + + - Role: xqueue + + - Role: edxapp + - 2016-10-27 + + - Role: security + - Changed SECURITY_UPGRADE_ON_ANSIBLE to only apply security updates. If you want to retain the behavior of running safe-upgrade, + you should switch to using SAFE_UPGRADE_ON_ANSIBLE. + - 2016-10-24 + + - Role: discovery + - Added `PUBLISHER_FROM_EMAIL` for sending emails to publisher app users. + - 2016-10-18 + + - Role: edxapp + - Added `EXPIRING_SOON_WINDOW` to show message to learners if their verification is expiring soon. + - 2016-10-11 + + - Role: edxapp + - Added COMPREHENSIVE_THEME_LOCALE_PATHS to support internationalization of strings originating from custom themes. + - 2016-06-30 + - Role: discovery + - Course Discovery JWT configuration now takes a list of issuers instead of a single issuer. This change is not backward compatible with older versions of course discovery. + + - 2016-06-22 + - Role: hadoop_common + - Enable log retention by default to assist with debugging. Now YARN will retain stdout and stderr logs produced by map reduce tasks for 24 hours. They can be retrieved by running "yarn logs -applicationId YOUR_APPLICATION_ID". + + - 2016-06-08 + + - Role: Edxapp + - `EDXAPP_COMPREHENSIVE_THEME_DIR` is deprecated and is maintained for backward compatibility, `EDXAPP_COMPREHENSIVE_THEME_DIRS` + should be used instead which is a list of directories. `EDXAPP_COMPREHENSIVE_THEME_DIR` if present will have priority over `EDXAPP_COMPREHENSIVE_THEME_DIRS` + - `COMPREHENSIVE_THEME_DIR` is deprecated and is maintained for backward compatibility, `COMPREHENSIVE_THEME_DIRS` should be used + instead which is a list of directories. `COMPREHENSIVE_THEME_DIR` if present will have priority over `COMPREHENSIVE_THEME_DIRS` + - 2016-05-23 + + - Role: ecommerce + - Renamed `ECOMMERCE_COMPREHENSIVE_THEME_DIR` to `ECOMMERCE_COMPREHENSIVE_THEME_DIRS`, `ECOMMERCE_COMPREHENSIVE_THEME_DIRS` + is now a list of directories. Change is backward incompatible. + - Renamed `COMPREHENSIVE_THEME_DIR` to `COMPREHENSIVE_THEME_DIRS`, `COMPREHENSIVE_THEME_DIRS` is now a list of directories. + Change is backward incompatible. + - 2016-01-25 + - Role: common + - Renamed `COMMON_AWS_SYNC` to `COMMON_OBJECT_STORE_LOG_SYNC` + - Renamed `COMMON_AWS_SYNC_BUCKET` to `COMMON_OBJECT_STORE_LOG_SYNC_BUCKET` + - Renamed `COMMON_AWS_S3_SYNC_SCRIPT` to `COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT` + - Added `COMMON_OBJECT_STORE_LOG_SYNC_PREFIX`. Default: `logs/tracking/` + - Role: aws + - Removed `AWS_S3_LOGS` + - Added `vhost` role as dependency + - Role: edxapp + - Added `EDXAPP_SWIFT_USERNAME` + - Added `EDXAPP_SWIFT_KEY` + - Added `EDXAPP_SWIFT_TENANT_ID` + - Added `EDXAPP_SWIFT_TENANT_NAME` + - Added `EDXAPP_SWIFT_AUTH_URL` + - Added `EDXAPP_SWIFT_AUTH_VERSION` + - Added `EDXAPP_SWIFT_REGION_NAME` + - Added `EDXAPP_SWIFT_USE_TEMP_URLS` + - Added `EDXAPP_SWIFT_TEMP_URL_KEY` + - Added `EDXAPP_SWIFT_TEMP_URL_DURATION` + - Added `EDXAPP_SETTINGS` to allow using a settings file other than `aws.py`. Default: `aws` + - Renamed `ENABLE_S3_GRADE_DOWNLOADS` to `ENABLE_GRADE_DOWNLOADS` + - Replaced `EDXAPP_GRADE_STORAGE_TYPE`, `EDXAPP_GRADE_BUCKET` and `EDXAPP_GRADE_ROOT_PATH` with `EDXAPP_GRADE_STORAGE_CLASS` and `EDXAPP_GRADE_STORAGE_KWARGS` + - Role: openstack + - Added role + - Role: vhost + - Added as dependency for aws and openstack roles. Handles common functionality for setting up VM hosts + - Role: xqueue + - Added `XQUEUE_SETTINGS` to specify which settings file to use. Default: `aws_settings` + - Renamed `XQUEUE_S3_BUCKET` to `XQUEUE_UPLOAD_BUCKET` + - Renamed `XQUEUE_S3_PATH_PREFIX` to `XQUEUE_UPLOAD_PATH_PREFIX` + + - 2015-12-15 + - Role: edxapp + - Removed SUBDOMAIN_BRANDING and SUBDOMAIN_COURSE_LISTINGS variables + + - 2015-12-03 + - Role: ora + - Remove the ora1 role as support for it was deprecated in Cypress. + - Removed dependencies on ora throughout the playbooks / vagrantfiles. + - 2015-11-12 + - Role: edxapp + - Removed XmlModuleStore from the default list of modulestores for the LMS. + - EDXAPP_XML_MAPPINGS variable no longer exists by default and is not used by the edxapp role. + + - 2015-11-03 + - Role: ecommerce + - Removed ECOMMERCE_ORDER_NUMBER_PREFIX variable + + - 2015-09-28 + - Role: edxapp + - All of the following changes are BACKWARDS-INCOMPATABLE: + - Renamed two top level variables SEGMENT_IO_LMS_KEY and SEGMENT_IO_KEY to SEGMENT_KEY in {lms|cms].auth.json. + - Renamed two top level variables in roles/edxapp/defaults/main.yml. EDXAPP_SEGMENT_IO_LMS_KEY and EDXAPP_SEGMENT_IO_KEY are now EDXAPP_LMS_SEGMENT_KEY and EDXAPP_CMS_SEGMENT_KEY respectively + - REMOVED two top level variables SEGMENT_IO_LMS and SEGMENT_IO from {lms|cms].auth.json. We will use the existence of the SEGMENT_KEY to to serve the same function that these boolean variables served. + - REMOVED two top level variables EDXAPP_SEGMENT_IO_LMS and EDXAPP_SEGMENT_IO from roles/edxapp/defaults/main.yml. + + - 2015-08-17 + - Updated ansible fork to be based on ansible 1.9.3rc1 instead of 1.9.1 + - Ansible Changelog: https://github.com/ansible/ansible/blob/stable-1.9/CHANGELOG.md + + - 2015-06-17 + - Role: rabbitmq + - Removed the RABBITMQ_CLUSTERED var and related tooling. The goal of the var was to be able to setup a cluster in the aws environment without having to know all the IPs of the cluster before hand. It relied on the `hostvars` ansible varible to work correctly which it no longer does in 1.9. This may get fixed in the future but for now, the "magic" setup doesn't work. + - Changed `rabbitmq_clustered_hosts` to RABBITMQ_CLUSTERED_HOSTS. + + - 2015-05-27 + - Role: edxapp + - Removed deprecated variables EDXAPP_PLATFORM_TWITTER_URL, EDXAPP_PLATFORM_MEETUP_URL, EDXAPP_PLATFORM_LINKEDIN_URL, and EDXAPP_PLATFORM_GOOGLE_PLUS_URL in favor of EDXAPP_SOCIAL_MEDIA_FOOTER_URLS. These variables haven't been used in edx-platform since March 17, 2015 (when https://github.com/openedx/edx-platform/pull/7383 was merged). This change is backwards incompatible with versions of edx-platform from before March 17, 2015. + - Added EDXAPP_MOBILE_STORE_URLS and EDXAPP_FOOTER_ORGANIZATION_IMAGE variables, used in https://github.com/openedx/edx-platform/pull/8175 (v3 version of the edx.org footer). + + + - We now remove the default syslog.d conf file (50-default.conf) this will + - Added EDXAPP_LMS_AUTH_EXTRA and EDXAPP_CMS_AUTH_EXTRA for passing unique AUTH_EXTRA configurations to the LMS and CMS. + - 2015-05-11 + - Updated ansible fork with small bug fix. + - https://github.com/ansible/ansible/pull/10957 + + - 2015-05-07 + - Role: edxapp + - Removed post.txt from the list of files that will have its github urls replaced with git mirror urls. + + - 2015-04-29 + - Role: edxapp + - The edxapp role no longer uses checksums to bypass pip installs. + - pip install will always run for all known requirements files. + + - Role: edx-ansible + - 2015-04-12 + - `/edx/bin/update` no longer runs the ansible command with `--tags deploy` + + - 2015-03-23 + - Role: edxapp + - Added newrelic monitoring capabilities to edxapp workers. Note that this is a BACKWARDS-INCOMPATABLE CHANGE, as it introduces a new key, `monitor`, to each item in `EDXAPP_CELERY_WORKERS` in `defaults/main.yml`, and plays including this role will fail if that key is not set. + + - 2015-03-05 + - Role: analytics_api, xqwatcher, insights, minos, edx_notes_api + - Expanded `edx_service` role to do git checkout and ec2 tagging + - Refactored roles that depend on `edx_service` to use the new interface: `minos`, `analytics_api`, `insights`, and `xqwatcher` + - Refactored name from `analytics-api` to `analytics_api` + - Changed location of minos' config file from `/edx/etc/minos/minos.yml` to `/edx/etc/minos.yml` + - Added new `edx_notes_api` role for forthcoming notes api + - This is a __BACKWARDS INCOMPATABLE__ change and will require additional migrations when upgrading an existing server. While we recommend building from scratch, running the following command _might_ work: + rm -rf /edx/etc/minos + - 2015-02-06 + ``` + rm -rf /edx/app/analytics-api /edx/app/ /edx/app/nginx/sites-available/analytics-api.j2 /edx/app/supervisor/conf.d.available/analytics_api.conf + ``` + + - 2015-02-02 + - Role: edxapp + - Enabled combined login registration feature by default + + - 2014-12-29 + - Role: notifier + - Refactored `NOTIFIER_HOME` and `NOTIFIER_USER` to `notifier_app_dir` and `notifier_user` to match other roles. This shouldn't change anything since users should've only been overriding COMMON_HOME. + + - 2014-12-10 + - Role: gitreload + - New role added for running + [gitreload](https://github.com/mitodl/gitreload) that can be used + for importing courses via github/gitlab Web hooks, or more + generally updating any git repository that is already checked out + on disk via a hook. + + - 2014-12-01 + - Role: analytics-api, edxapp, ora, xqueue, xserver + - Switched gunicorn from using an entirely command argument based + configuration to usign python configuration files. Variables for + extra configuration in the configuration file template, and + command line argument overrides are available. + + - 2014-11-13 + - Role: analytics-api, insights + - Using Django 1.7 migrate command. + + - 2014-10-15 + - Role: edxapp + - A new var was added to make it easy ot invalidate the default + memcache store to make it easier to invalidate sessions. Updating + the edxapp env.json files will result in all users getting logged + out. This is a one time penalty as long as the value of `EDXAPP_DEFAULT_CACHE_VERSION` + is not explicitly changed. + + - 2014-09-18 + - Role: nginx + - New html templates for server errors added. + Defaults for a ratelimiting static page and server error static page. + CMS/LMS are set to use them by default, wording can be changed in the + Nginx default vars. + + - 2014-09-15 + - Role: edxapp + - We now have an all caps variable override for celery workers + - 2014-08-28 + + - Role: Edxapp + Both variables default to EDXAPP_AUTH_EXTRA for backward compatibility + - 2014-08-22 + + - Role: Mongo + - Fixed case of variable used in if block that breaks cluster configuration + by changing mongo_clustered to MONGO_CLUSTERED. + - 2014-08-20 + - Role: common + break people who have hand edited that file. + + - 2014-08-15 + - Role: edxapp + - Updated the module store settings to match the new settings format. + + - 2014-08-05 + - Update, possible breaking change: the edxapp role vars edxapp_lms_env and edxapp_cms_env have + been changed to EDXAPP_LMS_ENV and EDXAPP_CMS_ENV to indicate, via our convention, + that overridding them is expected. The default values remain the same. + + - 2014-06-26 + - Role: analytics-api + - Added a new role for the analytics-api Django app. Currently a private repo + + - Logrotation now happens hourly by default for all logs. + + - Basic auth will be turned on by default + - Update `CMS_HOSTNAME` default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`. + - 2014-06-11 + - Role: xqwatcher, xqueue, nginx, edxapp, common + - Moving nginx basic authorization flag and credentials to the common role + + - 2014-06-02 + - Role: Edxapp + - Turn on code sandboxing by default and allow the jailed code to be able to write + files to the tmp directory created for it by codejail. + + - 2014-05-28 + - Role: Edxapp + - The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform + via pull #3487(https://github.com/openedx/edx-platform/pull/3487) + + - 2014-05-19 + + - Start a change log to keep track of backwards incompatible changes and deprecations. diff --git a/Makefile b/Makefile new file mode 100755 index 00000000000..8142015e4e4 --- /dev/null +++ b/Makefile @@ -0,0 +1,56 @@ +SHELL := /bin/bash +.PHONY: help requirements clean build test pkg + +help: main.help + +main.help: + @echo '' + @echo 'Makefile for the edX Configuration' + @echo '' + @echo 'Usage:' + @echo ' make requirements install requirements' + @echo ' make upgrade upgrade dependencies in requirements files' + @echo ' make test run all tests' + @echo ' make build build everything' + @echo ' make pkg package everything' + @echo ' make clean remove build by-products' + @echo '' + +requirements: + pip install -qr requirements/pip.txt --exists-action w + pip install -qr requirements.txt --exists-action w + +COMMON_CONSTRAINTS_TXT=requirements/common_constraints.txt +.PHONY: $(COMMON_CONSTRAINTS_TXT) +$(COMMON_CONSTRAINTS_TXT): + wget -O "$(@)" https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt || touch "$(@)" + +upgrade: export CUSTOM_COMPILE_COMMAND=make upgrade +upgrade: $(COMMON_CONSTRAINTS_TXT) + ## update the pip requirements files to use the latest releases satisfying our constraints + pip install -qr requirements/pip.txt + pip install -qr requirements/pip-tools.txt + # Make sure to compile files after any other files they include! + pip-compile --allow-unsafe --rebuild --upgrade -o requirements/pip.txt requirements/pip.in + pip-compile --upgrade -o requirements/pip-tools.txt requirements/pip-tools.in + pip install -qr requirements/pip.txt + pip install -qr requirements/pip-tools.txt + pip-compile --upgrade -o requirements.txt requirements/base.in + pip-compile --upgrade -o playbooks/roles/aws/templates/requirements.txt.j2 requirements/aws.in + pip-compile --upgrade -o util/elasticsearch/requirements.txt requirements/elasticsearch.in + pip-compile --upgrade -o util/jenkins/requirements-cloudflare.txt requirements/cloudflare.in + pip-compile --upgrade -o util/pingdom/requirements.txt requirements/pingdom.in + pip-compile --upgrade -o util/vpc-tools/requirements.txt requirements/vpc-tools.in + pip-compile --upgrade -o util/jenkins/requirements.txt requirements/jenkins.in + # Post process all of the files generated above to work around open pip-tools issues + util/post-pip-compile.sh \ + requirements/pip-tools.txt \ + requirements.txt \ + playbooks/roles/aws/templates/requirements.txt.j2 \ + util/elasticsearch/requirements.txt \ + util/jenkins/requirements-cloudflare.txt \ + util/pingdom/requirements.txt \ + util/vpc-tools/requirements.txt \ + util/jenkins/requirements.txt + +include *.mk diff --git a/README.md b/README.md deleted file mode 100644 index 1df79eb9ca6..00000000000 --- a/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Configuration Management - -## Introduction - -**This project is currently in alpha** - -The goal of the edx/configuration project is to provide a simple, but -flexible, way for anyone to stand up an instance of Open edX that is -fully configured and ready-to-go. - -Building the platform takes place in two phases: - -* Infrastructure provisioning -* Service configuration - -As much as possible, we have tried to keep a clean distinction between -provisioning and configuration. You are not obliged to use our tools -and are free to use one, but not the other. The provisioning phase -stands-up the required resources and tags them with role identifiers -so that the configuration tool can come in and complete the job. - -The reference platform is provisioned using an Amazon -[CloudFormation](http://aws.amazon.com/cloudformation/) template. -When the stack has been fully created you will have a new AWS Virtual -Private Cloud with hosts for the core edX services. This template -will build quite a number of AWS resources that cost money, so please -consider this before you start. - -The configuration phase is manged by [Ansible](http://ansible.cc/). -We have provided a number of playbooks that will configure each of -the edX service. - -This project is a re-write of the current edX provisioning and -configuration tools, we will be migrating features to this project -over time, so expect frequent changes. - - -For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki). diff --git a/README.rst b/README.rst new file mode 100644 index 00000000000..e09c88e7574 --- /dev/null +++ b/README.rst @@ -0,0 +1,79 @@ +Configuration Management +######################## + +DEPRECATION WARNING +******************* + +This repository is Deprecated and is planned to be archived after the Redwood +Cut (Late April 2024). See https://github.com/openedx/public-engineering/issues/51 +for more details and up-to-date information. + +Description +*********** + +This repository is a collection of tools and scripts that edx.org uses to deploy +openedx. The purpose of this repository is to share portions of our toolchain +with the community. This repository is *not* the best way to get started running +openedx. For that, please look at `Open EdX Installation options`_, which contains +links to the recommended paths for new installations. + +**Important**: The Open edX configuration scripts need to be run as root on +your servers and will make changes to service configurations including, but not +limited to, sshd, dhclient, sudo, apparmor and syslogd. Our scripts are made +available as we use them and they implement our best practices. We strongly +recommend that you review everything that these scripts will do before running +them against your servers. We also recommend against running them against +servers that are hosting other applications. No warranty is expressed or +implied. + +For more information including installation instructions please see the `OpenEdX +Wiki`_. + +For info on any large recent changes please see the `change log`_. + +What is in this Repo? +********************* + +* `playbooks `__: This directory contains ansible playbooks that can + be used to configure individual services in the openedx platform. See + `Open EdX Installation options`_ before trying to use any of the scripts in + this directory. +* `docker `__: This directory contains dockerfiles that can be used to + test that playbooks execute cleanly. See `Makefiles `__ for + Documentation on how to run these containers. +* `requirements `__ : inputs for `pip-compile `__ + Update files in this directory and then run ``make upgrade`` to update + ``requirements.txt`` +* `tests `__: scripts used by travis-ci to test changes to this repo +* `util `__: one-off scripts or tools used to perform certain functions + related to openedx management. +* `vagrant `__: vagrant tooling for testing changes to this repo. + + +Roadmap +******* + +This repository is in ``sustained`` status. The goal is to deprecate this codebase +and move the deployment code into the repos with the application code. + +With the adoption of containerized application platforms like `Kubernetes +`__, the tools in this repository are complex +and inappropriate for building small single purpose containers. + +At edx.org, we are focusing on deployment of applications using `Terraform +`__ and `Kubernetes `__. We +hope to provide open source tooling for this soon. + + +Contributing +************ + +* Bugfixes: If you would like to contribute a bugfix to this codebase, please open + a pull request. A bot will automatically walk your contribution through the + `Open Source Contribution process `__. + + +.. _Open EdX Installation options: https://open.edx.org/installation-options +.. _Ansible: http://ansible.com/ +.. _OpenEdX Wiki: https://openedx.atlassian.net/wiki/display/OpenOPS/Open+edX+Operations+Home +.. _change log: https://github.com/openedx/configuration/blob/master/CHANGELOG.md diff --git a/cloudformation_templates/edx-admin-reference-architecture.json b/cloudformation_templates/edx-admin-reference-architecture.json deleted file mode 100644 index e65432cbb09..00000000000 --- a/cloudformation_templates/edx-admin-reference-architecture.json +++ /dev/null @@ -1,892 +0,0 @@ -{ - "AWSTemplateFormatVersion":"2010-09-09", - "Description":"Bring up a VPC for operations.", - "Parameters":{ - "DeploymentTag":{ - "Type":"String", - "Description":"A tag value applied to the hosts in the VPC indicating which deployment this is, e.g., edx, edge, , " - }, - "KeyName":{ - "Type":"String", - "Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server", - "Default":"deployment" - }, - "AdminInstanceType":{ - "Description":"WebServer EC2 instance type", - "Type":"String", - "Default":"m1.large", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "SSHLocation":{ - "Description":"The IP address range that can be used to SSH to the EC2 instances", - "Type":"String", - "MinLength":"9", - "MaxLength":"18", - "Default":"0.0.0.0/0", - "AllowedPattern":"(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription":"must be a valid IP CIDR range of the form x.x.x.x/x." - }, - "BastionInstanceType":{ - "Description":"Bastion Host EC2 instance type", - "Type":"String", - "Default":"t1.micro", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "NATInstanceType":{ - "Description":"NAT Device EC2 instance type", - "Type":"String", - "Default":"t1.micro", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "JenkinsServerPort":{ - "Description":"The TCP port for the Jenkins server", - "Type":"Number", - "Default":"8080" - }, - "AsgardServerPort":{ - "Description":"The TCP port for the Asgard server", - "Type":"Number", - "Default":"8090" - }, - "MongoServicePort":{ - "Description":"The TCP port for the deployment mongo server", - "Type":"Number", - "Default":"10001" - } - }, - "Mappings":{ - "AWSInstanceType2Arch":{ - "t1.micro": { "Arch":"64" }, - "m1.small": { "Arch":"64" }, - "m1.medium": { "Arch":"64" }, - "m1.large": { "Arch":"64" }, - "m1.xlarge": { "Arch":"64" }, - "m2.xlarge": { "Arch":"64" }, - "m2.2xlarge": { "Arch":"64" }, - "m2.4xlarge": { "Arch":"64" }, - "m3.xlarge": { "Arch":"64" }, - "m3.2xlarge": { "Arch":"64" }, - "c1.medium": { "Arch":"64" }, - "c1.xlarge": { "Arch":"64" }, - "cg1.4xlarge": { "Arch":"64HVM" } - }, - "AWSRegionArch2AMI":{ - "us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" }, - "us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" }, - "us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" }, - "eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" }, - "sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" }, - "ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" }, - "ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" }, - "ap-northeast-1": { "32":"ami-fc6ceefd", "64":"ami-fe6ceeff" } - }, - "AWSNATAMI":{ - "us-east-1": { "AMI":"ami-c6699baf" }, - "us-west-2": { "AMI":"ami-52ff7262" }, - "us-west-1": { "AMI":"ami-3bcc9e7e" }, - "eu-west-1": { "AMI":"ami-0b5b6c7f" }, - "ap-southeast-1": { "AMI":"ami-02eb9350" }, - "ap-southeast-2": { "AMI":"ami-ab990e91" }, - "ap-northeast-1": { "AMI":"ami-14d86d15" }, - "sa-east-1": { "AMI":"ami-0439e619" } - }, - "SubnetConfig":{ - "VPC": { "CIDR":"10.0.0.0/16" }, - "Public01": { "CIDR":"10.0.0.0/24" }, - "Admin": { "CIDR":"10.0.185.0/24" } - }, - "MapRegionsToAvailZones":{ - "us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" }, - "us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" }, - "us-west-2": { "AZone0":"us-west-2a", "AZone1":"us-west-2b", "AZone2":"us-west-2c" }, - "eu-west-1": { "AZone0":"eu-west-1a", "AZone1":"eu-west-1b", "AZone2":"eu-west-1c" }, - "sa-east-1": { "AZone0":"sa-east-1a", "AZone1":"sa-east-1b", "AZone2":"sa-east-1c" }, - "ap-southeast-1": { "AZone0":"ap-southeast-1a", "AZone1":"ap-southeast-1b", "AZone2":"ap-southeast-1c" }, - "ap-southeast-2": { "AZone0":"ap-southeast-2a", "AZone1":"ap-southeast-2b", "AZone2":"ap-southeast-2c" }, - "ap-northeast-1": { "AZone0":"ap-northeast-1a", "AZone1":"ap-northeast-1b", "AZone2":"ap-northeast-1c" } - } - }, - "Resources":{ - "AdminVPC":{ - "Type":"AWS::EC2::VPC", - "Properties":{ - "EnableDnsSupport" : "true", - "EnableDnsHostnames" : "true", - "CidrBlock":"10.0.0.0/16", - "InstanceTenancy":"default" - } - }, - "PublicSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"AdminVPC" - }, - "CidrBlock":{ - "Fn::FindInMap":[ - "SubnetConfig", - "Public01", - "CIDR" - ] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"immutable_metadata", - "Value":"{'purpose':'external','target':'ec2'}" - } - ] - } - }, - "AdminSubnet":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"AdminVPC" - }, - "CidrBlock":{ - "Fn::FindInMap":[ - "SubnetConfig", - "Admin", - "CIDR" - ] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"admin" - }, - { - "Key":"Network", - "Value":"Private" - } - ] - } - }, - "InternetGateway":{ - "Type":"AWS::EC2::InternetGateway", - "Properties":{ - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Public" - } - ] - } - }, - "GatewayToInternet":{ - "Type":"AWS::EC2::VPCGatewayAttachment", - "Properties":{ - "VpcId":{ - "Ref":"AdminVPC" - }, - "InternetGatewayId":{ - "Ref":"InternetGateway" - } - } - }, - "PublicRouteTable":{ - "Type":"AWS::EC2::RouteTable", - "Properties":{ - "VpcId":{ - "Ref":"AdminVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Public" - } - ] - } - }, - "PublicRoute":{ - "Type":"AWS::EC2::Route", - "Properties":{ - "RouteTableId":{ - "Ref":"PublicRouteTable" - }, - "DestinationCidrBlock":"0.0.0.0/0", - "GatewayId":{ - "Ref":"InternetGateway" - } - } - }, - "PublicSubnetRouteTableAssociation01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "RouteTableId":{ - "Ref":"PublicRouteTable" - } - } - }, - "PublicNetworkAcl":{ - "Type":"AWS::EC2::NetworkAcl", - "Properties":{ - "VpcId":{ - "Ref":"AdminVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Public" - } - ] - } - }, - "InboundHTTPPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"80", - "To":"80" - } - } - }, - "InboundHTTPSPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"101", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"443", - "To":"443" - } - } - }, - "InboundSSHPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"102", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":{ - "Ref":"SSHLocation" - }, - "PortRange":{ - "From":"22", - "To":"22" - } - } - }, - "InboundEmphemeralPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"103", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"1024", - "To":"65535" - } - } - }, - "OutboundPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"true", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"0", - "To":"65535" - } - } - }, - "PublicSubnetNetworkAclAssociation01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - } - } - }, - "PrivateRouteTable":{ - "Type":"AWS::EC2::RouteTable", - "Properties":{ - "VpcId":{ - "Ref":"AdminVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Private" - } - ] - } - }, - "PrivateRoute":{ - "Type":"AWS::EC2::Route", - "Properties":{ - "RouteTableId":{ - "Ref":"PrivateRouteTable" - }, - "DestinationCidrBlock":"0.0.0.0/0", - "InstanceId":{ - "Ref":"NATDevice" - } - } - }, - "PrivateSubnetRouteTableAssociationAdmin":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"AdminSubnet" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateNetworkAcl":{ - "Type":"AWS::EC2::NetworkAcl", - "Properties":{ - "VpcId":{ - "Ref":"AdminVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Private" - } - ] - } - }, - "InboundPrivateNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"0", - "To":"65535" - } - } - }, - "OutBoundPrivateNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"true", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"0", - "To":"65535" - } - } - }, - "PrivateSubnetNetworkAclAssociationAdmin":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"AdminSubnet" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "NATIPAddress":{ - "Type":"AWS::EC2::EIP", - "Properties":{ - "Domain":"vpc", - "InstanceId":{ - "Ref":"NATDevice" - } - } - }, - "NATDevice":{ - "Type":"AWS::EC2::Instance", - "Properties":{ - "InstanceType":{ - "Ref":"NATInstanceType" - }, - "KeyName":{ - "Ref":"KeyName" - }, - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "SourceDestCheck":"false", - "ImageId":{ - "Fn::FindInMap":[ - "AWSNATAMI", - { - "Ref":"AWS::Region" - }, - "AMI" - ] - }, - "SecurityGroupIds":[ - { - "Ref":"NATSecurityGroup" - } - ] - } - }, - "NATSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable internal access to the NAT device", - "VpcId":{ - "Ref":"AdminVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":{ "Ref": "MongoServicePort" }, - "ToPort":{ "Ref": "MongoServicePort" }, - "CidrIp":"0.0.0.0/0" - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":{ "Ref": "MongoServicePort" }, - "ToPort":{ "Ref": "MongoServicePort" }, - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "BastionIPAddress":{ - "Type":"AWS::EC2::EIP", - "Properties":{ - "Domain":"vpc", - "InstanceId":{ - "Ref":"BastionHost" - } - } - }, - "BastionHost":{ - "Type":"AWS::EC2::Instance", - "Properties":{ - "InstanceType":{ - "Ref":"BastionInstanceType" - }, - "KeyName":{ - "Ref":"KeyName" - }, - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"BastionInstanceType" - }, - "Arch" - ] - } - ] - }, - "SecurityGroupIds":[ - { - "Ref":"BastionSecurityGroup" - } - ], - "Tags":[ - { - "Key":"play", - "Value":"bastion" - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ] - } - }, - "BastionSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable access to the Bastion host", - "VpcId":{ - "Ref":"AdminVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":"10.0.0.0/16" - }, - { - "IpProtocol":"tcp", - "FromPort":{ "Ref": "JenkinsServerPort" }, - "ToPort":{ "Ref": "JenkinsServerPort" }, - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":{ "Ref": "AsgardServerPort" }, - "ToPort":{ "Ref": "AsgardServerPort" }, - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "AdminRole": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ { - "Effect": "Allow", - "Principal": { - "Service": [ "ec2.amazonaws.com" ] - }, - "Action": [ "sts:AssumeRole" ] - } ] - }, - "Path": "/", - "Policies": [ { - "PolicyName": "AdminBasePolicy", - "PolicyDocument": { - "Statement":[ - { - "Effect":"Allow", - "Action": "*", - "Resource":"*" - } - ] - } - } ] - } - }, - "AdminInstanceProfile": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "Path": "/", - "Roles": [ { - "Ref": "AdminRole" - } ] - } - }, - "AdminHost":{ - "Type":"AWS::EC2::Instance", - "Properties":{ - "InstanceType":{ - "Ref":"AdminInstanceType" - }, - "KeyName":{ - "Ref":"KeyName" - }, - "IamInstanceProfile" : { - "Ref" : "AdminInstanceProfile" - }, - "SubnetId":{ - "Ref":"AdminSubnet" - }, - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"AdminInstanceType" - }, - "Arch" - ] - } - ] - }, - "SecurityGroupIds":[ - { - "Ref":"AdminSecurityGroup" - } - ], - "Tags":[ - { - "Key":"play", - "Value":"admin" - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "UserData":{ - "Fn::Base64":{ - "Fn::Join":[ - "", - [ - "#!/bin/bash -x\n", - "exec >> /home/ubuntu/cflog.log\n", - "exec 2>> /home/ubuntu/cflog.log\n", - "function error_exit\n", - "{\n", - " cfn-signal -e 1 -r \"$1\" '", - { - "Ref":"AdminServerWaitHandle" - }, - "'\n", - " exit 1\n", - "}\n", - "apt-get -y update\n", - "apt-get -y install python-setuptools\n", - "echo \"Python Tools installed\" - `date`\n", - "easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", - "echo \"Cloudformation Boostrap installed \" - `date`\n", - "# If all went well, signal success\n", - "cfn-signal -e $? -r 'Edx Server configuration' '", - { - "Ref":"AdminServerWaitHandle" - }, - "'\n" - ] - ] - } - } - } - }, - "AdminSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Admin Security Group", - "VpcId":{ - "Ref":"AdminVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort":{ "Ref": "JenkinsServerPort" }, - "ToPort":{ "Ref": "JenkinsServerPort" }, - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":{ "Ref": "AsgardServerPort" }, - "ToPort":{ "Ref": "AsgardServerPort" }, - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "AdminServerWaitHandle":{ - "Type":"AWS::CloudFormation::WaitConditionHandle" - }, - "AdminServerWaitCondition":{ - "Type":"AWS::CloudFormation::WaitCondition", - "DependsOn":"AdminHost", - "Properties":{ - "Handle":{ - "Ref":"AdminServerWaitHandle" - }, - "Timeout":"1200" - } - } - } -} diff --git a/cloudformation_templates/edx-reference-architecture.json b/cloudformation_templates/edx-reference-architecture.json deleted file mode 100644 index f40c0a1ecc4..00000000000 --- a/cloudformation_templates/edx-reference-architecture.json +++ /dev/null @@ -1,5705 +0,0 @@ -{ - "AWSTemplateFormatVersion":"2010-09-09", - "Description":"Bring up the complete EdX stack in a VPC.", - "Parameters":{ - "EnvironmentTag":{ - "Type":"String", - "Description":"A tag value applied to the hosts in the VPC indicating which environment to use during the configuration phase, e.g., stage, prod, sandbox", - "Default":"sandbox" - }, - "DeploymentTag":{ - "Type":"String", - "Description":"A tag value applied to the hosts in the VPC indicating which deployment this is, e.g., edx, edge, , " - }, - "KeyName":{ - "Type":"String", - "Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server", - "Default":"deployment" - }, - "EdxappInstanceType":{ - "Description":"WebServer EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "WorkerInstanceType":{ - "Description":"Worker EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "ForumInstanceType":{ - "Description":"Forum EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "NotifierInstanceType":{ - "Description":"Notifier EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "MongoInstanceType":{ - "Description":"Worker EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "CommonClusterInstanceType":{ - "Description":"The instance type on which common, clustered applications, e.g., RabbitMQ and Elasticsearch are hosted.", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "XserverInstanceType":{ - "Description":"Xserver server EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "XqueueInstanceType":{ - "Description":"Xqueue server EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "SSHLocation":{ - "Description":"The IP address range that can be used to SSH to the EC2 instances", - "Type":"String", - "MinLength":"9", - "MaxLength":"18", - "Default":"0.0.0.0/0", - "AllowedPattern":"(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription":"must be a valid IP CIDR range of the form x.x.x.x/x." - }, - "EdxappServerPort":{ - "Description":"The TCP port for the Edxapp Server", - "Type":"Number", - "Default":"18000" - }, - "XqueueServerPort":{ - "Description":"The TCP port for the Xqueue server", - "Type":"Number", - "Default":"18040" - }, - "XserverServerPort":{ - "Description":"The TCP port for the XServer", - "Type":"Number", - "Default":"18050" - }, - "ForumServerPort":{ - "Description":"The TCP port for the Forum Server", - "Type":"Number", - "Default":"18080" - }, - "CacheNodePort":{ - "Description":"The TCP port for the nodes in the Elasticache cluster", - "Type":"Number", - "Default":"11211" - }, - "SSLCertificateARN":{ - "Description":"The ARN for an SSL certificate to use with the edxapp.", - "Type":"String", - "Default":"arn:aws:iam::372153017832:server-certificate/dummy" - }, - "BastionInstanceType":{ - "Description":"Bastion Host EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "NATInstanceType":{ - "Description":"NET Device EC2 instance type", - "Type":"String", - "Default":"m1.small", - "AllowedValues":[ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ], - "ConstraintDescription":"must be a valid EC2 instance type." - }, - "EdxappDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the edxapp hosts", - "Type":"Number", - "Default":"2" - }, - "XqueueDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the xqueue hosts", - "Type":"Number", - "Default":"2" - }, - "XServerDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the xserver hosts", - "Type":"Number", - "Default":"2" - }, - "CommonClusterDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the CommonCluster hosts", - "Type":"Number", - "Default":"3" - }, - "WorkerDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the celery worker hosts", - "Type":"Number", - "Default":"2" - }, - "ForumDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the forums hosts", - "Type":"Number", - "Default":"2" - }, - "NotifierDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the notifier hosts", - "Type":"Number", - "Default":"1" - }, - "MongoDesiredCapacity":{ - "Description":"The Auto-scaling group desired capacity for the mongodb hosts", - "Type":"Number", - "Default":"3" - }, - "CacheNodeType":{ - "Default":"cache.m1.small", - "Description":"The compute and memory capacity of the nodes in the Cache Cluster", - "Type":"String", - "AllowedValues":[ - "cache.t1.micro", - "cache.m1.small", - "cache.m1.large", - "cache.m1.xlarge", - "cache.m2.xlarge", - "cache.m2.2xlarge", - "cache.m2.4xlarge", - "cache.c1.xlarge" - ], - "ConstraintDescription":"must select a valid Cache Node type." - }, - "NumberOfCacheNodes":{ - "Default":"2", - "Description":"The number of Cache Nodes the Cache Cluster should have", - "Type":"Number", - "MinValue":"1", - "MaxValue":"10", - "ConstraintDescription":"must be between 1 and 10." - }, - "DBName":{ - "Default":"edxapp", - "Description":"The database name", - "Type":"String", - "MinLength":"1", - "MaxLength":"64", - "AllowedPattern":"[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription":"must begin with a letter and contain only alphanumeric characters." - }, - "DBUsername":{ - "Default":"root", - "NoEcho":"true", - "Description":"The database admin account username", - "Type":"String", - "MinLength":"1", - "MaxLength":"16", - "AllowedPattern":"[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription":"must begin with a letter and contain only alphanumeric characters." - }, - "DBPassword":{ - "Default":"changeme", - "NoEcho":"true", - "Description":"The database admin account password", - "Type":"String", - "MinLength":"8", - "MaxLength":"41", - "ConstraintDescription":"must contain only alphanumeric characters." - }, - "DBClass":{ - "Default":"db.m1.small", - "Description":"Database instance class", - "Type":"String", - "AllowedValues":[ - "db.m1.micro", - "db.m1.small", - "db.m1.large", - "db.m1.xlarge", - "db.m2.xlarge", - "db.m2.2xlarge", - "db.m2.4xlarge" - ], - "ConstraintDescription":"must select a valid database instance type." - }, - "DBEngineVersion":{ - "Default":"5.6", - "Description":"Version of MySQL for the RDS", - "Type":"String", - "AllowedValues":[ - "5.5", - "5.6" - ], - "ConstraintDescription":"must select a valid database version." - }, - "DBAllocatedStorage":{ - "Default":"5", - "Description":"The size of the database (Gb)", - "Type":"Number", - "MinValue":"5", - "MaxValue":"3072", - "ConstraintDescription":"must be between 5 and 3072Gb." - }, - "MongoVolumeSize":{ - "Default":"5", - "Description":"The size of the mongodb volumes(Gb). Because of RAID double the volume size will be available for mongo to use.", - "Type":"Number", - "MinValue":"5", - "MaxValue":"3072", - "ConstraintDescription":"must be between 5 and 3072Gb." - }, - "ClassB":{ - "Default":"1", - "Description":"The second octet of the Class B to be allocated for this VPC. 10.?.xxx.xxx", - "Type":"Number", - "MinValue":"0", - "MaxValue":"255", - "ConstraintDescription":"ClassB value must be between 0 and 255." - } - }, - "Mappings":{ - "AWSInstanceType2Arch":{ - "t1.micro": { "Arch":"64" }, - "m1.small": { "Arch":"64" }, - "m1.medium": { "Arch":"64" }, - "m1.large": { "Arch":"64" }, - "m1.xlarge": { "Arch":"64" }, - "m2.xlarge": { "Arch":"64" }, - "m2.2xlarge": { "Arch":"64" }, - "m2.4xlarge": { "Arch":"64" }, - "m3.xlarge": { "Arch":"64" }, - "m3.2xlarge": { "Arch":"64" }, - "c1.medium": { "Arch":"64" }, - "c1.xlarge": { "Arch":"64" }, - "cg1.4xlarge": { "Arch":"64HVM" } - }, - "AWSRegionArch2AMI":{ - "us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" }, - "us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" }, - "us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" }, - "eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" }, - "sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" }, - "ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" }, - "ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" }, - "ap-northeast-1": { "32":"ami-fc6ceefd", "64":"ami-fe6ceeff" } - }, - "AWSNATAMI":{ - "us-east-1": { "AMI":"ami-c6699baf" }, - "us-west-2": { "AMI":"ami-52ff7262" }, - "us-west-1": { "AMI":"ami-3bcc9e7e" }, - "eu-west-1": { "AMI":"ami-0b5b6c7f" }, - "ap-southeast-1": { "AMI":"ami-02eb9350" }, - "ap-southeast-2": { "AMI":"ami-ab990e91" }, - "ap-northeast-1": { "AMI":"ami-14d86d15" }, - "sa-east-1": { "AMI":"ami-0439e619" } - }, - "SubnetConfig":{ - "VPC": { "CIDR":".0.0/16" }, - "Public01": { "CIDR":".0.0/24" }, - "Public02": { "CIDR":".1.0/24" }, - "Edxapp01": { "CIDR":".10.0/24" }, - "Edxapp02": { "CIDR":".11.0/24" }, - "XServerJail01": { "CIDR":".20.0/24" }, - "XServerJail02": { "CIDR":".21.0/24" }, - "Xqueue01": { "CIDR":".30.0/24" }, - "Xqueue02": { "CIDR":".31.0/24" }, - "CommonCluster01": { "CIDR":".46.0/24"}, - "CommonCluster02": { "CIDR":".47.0/24"}, - "CommonCluster03": { "CIDR":".48.0/24"}, - "Data01": { "CIDR":".50.0/24" }, - "Data02": { "CIDR":".51.0/24" }, - "Cache01": { "CIDR":".60.0/24" }, - "Cache02": { "CIDR":".61.0/24" }, - "Worker01": { "CIDR":".70.0/24" }, - "Worker02": { "CIDR":".71.0/24" }, - "Forum01": { "CIDR":".80.0/24" }, - "Forum02": { "CIDR":".81.0/24" }, - "Mongo01": { "CIDR":".90.0/24" }, - "Mongo02": { "CIDR":".91.0/24" }, - "Mongo03": { "CIDR":".92.0/24" }, - "Notifier01": { "CIDR":".100.0/24" }, - "Admin": { "CIDR":".200.0/24" } - }, - "MapRegionsToAvailZones":{ - "us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" }, - "us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" }, - "us-west-2": { "AZone0":"us-west-2a", "AZone1":"us-west-2b", "AZone2":"us-west-2c" }, - "eu-west-1": { "AZone0":"eu-west-1a", "AZone1":"eu-west-1b", "AZone2":"eu-west-1c" }, - "sa-east-1": { "AZone0":"sa-east-1a", "AZone1":"sa-east-1b", "AZone2":"sa-east-1c" }, - "ap-southeast-1": { "AZone0":"ap-southeast-1a", "AZone1":"ap-southeast-1b", "AZone2":"ap-southeast-1c" }, - "ap-southeast-2": { "AZone0":"ap-southeast-2a", "AZone1":"ap-southeast-2b", "AZone2":"ap-southeast-2c" }, - "ap-northeast-1": { "AZone0":"ap-northeast-1a", "AZone1":"ap-northeast-1b", "AZone2":"ap-northeast-1c" } - } - }, - "Resources":{ - "EdxVPC":{ - "Type":"AWS::EC2::VPC", - "Properties":{ - "EnableDnsSupport" : "true", - "EnableDnsHostnames" : "true", - "CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}, - "InstanceTenancy":"default" - } - }, - "PublicSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Public01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "external','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "PublicSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Public02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "external','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "AdminSubnet":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Admin", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"admin" - }, - { - "Key":"Network", - "Value":"Private" - } - ] - } - }, - "EdxappSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Edxapp01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"edxapp" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key":"immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-edxapp','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "EdxappSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Edxapp02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"edxapp" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key":"immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-edxapp','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "XqueueSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Xqueue01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"xqueue" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-xqueue','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "XqueueSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Xqueue02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"xqueue" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-xqueue','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "CommonClusterSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "CommonCluster01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"commoncluster" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-commoncluster','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "CommonClusterSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "CommonCluster02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"commoncluster" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-commoncluster','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "CommonClusterSubnet03":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "CommonCluster03", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone2" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"commoncluster" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-commoncluster','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "XServerSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "XServerJail01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"xserver" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-xserver','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "XServerSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "XServerJail02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"xserver" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-xserver','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "Data01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Data01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"RDS" - }, - { - "Key":"Network", - "Value":"Data" - } - ] - } - }, - "Data02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Data02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"RDS" - }, - { - "Key":"Network", - "Value":"Data" - } - ] - } - }, - "Cache01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Cache01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"Elasticache" - }, - { - "Key":"Network", - "Value":"Cache" - } - ] - } - }, - "Cache02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Cache02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"Elasticache" - }, - { - "Key":"Network", - "Value":"Cache" - } - ] - } - }, - "WorkerSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Worker01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"worker" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-worker','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "WorkerSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Worker02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"worker" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-worker','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "ForumSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Forum01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"forum" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-forum','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "ForumSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Forum02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"forum" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-forum','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "MongoSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"mongo" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-mongo','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "MongoSubnet02":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo02", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone1" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"mongo" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-mongo','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "MongoSubnet03":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo03", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone2" - ] - }, - "Tags":[ - { - "Key":"Application", - "Value":"mongo" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-mongo','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "NotifierSubnet01":{ - "Type":"AWS::EC2::Subnet", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "CidrBlock":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Notifier01", - "CIDR" - ]} - ]] - }, - "AvailabilityZone":{ - "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "Tags":[ - { - "Key":"play", - "Value":"notifier" - }, - { - "Key":"Network", - "Value":"Private" - }, - { - "Key" : "immutable_metadata", - "Value":{"Fn::Join":["", - ["{'purpose':'", - {"Ref":"EnvironmentTag"}, - "-", - {"Ref":"DeploymentTag"}, - "-", - "internal-notifier','target':'ec2'}" - ] - ] - } - } - ] - } - }, - "InternetGateway":{ - "Type":"AWS::EC2::InternetGateway", - "Properties":{ - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Public" - } - ] - } - }, - "GatewayToInternet":{ - "Type":"AWS::EC2::VPCGatewayAttachment", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "InternetGatewayId":{ - "Ref":"InternetGateway" - } - } - }, - "PublicRouteTable":{ - "Type":"AWS::EC2::RouteTable", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Public" - } - ] - } - }, - "PublicRoute":{ - "Type":"AWS::EC2::Route", - "Properties":{ - "RouteTableId":{ - "Ref":"PublicRouteTable" - }, - "DestinationCidrBlock":"0.0.0.0/0", - "GatewayId":{ - "Ref":"InternetGateway" - } - } - }, - "PublicSubnetRouteTableAssociation01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "RouteTableId":{ - "Ref":"PublicRouteTable" - } - } - }, - "PublicSubnetRouteTableAssociation02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"PublicSubnet02" - }, - "RouteTableId":{ - "Ref":"PublicRouteTable" - } - } - }, - "PublicNetworkAcl":{ - "Type":"AWS::EC2::NetworkAcl", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Public" - } - ] - } - }, - "InboundHTTPPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"80", - "To":"80" - } - } - }, - "InboundHTTPSPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"101", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"443", - "To":"443" - } - } - }, - "InboundSSHPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"102", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":{ - "Ref":"SSHLocation" - }, - "PortRange":{ - "From":"22", - "To":"22" - } - } - }, - "InboundEmphemeralPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"103", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"1024", - "To":"65535" - } - } - }, - "InboundPingRequestPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"104", - "Protocol":"1", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}, - "Icmp": { - "Code": "0", - "Type": "0" - } - } - }, - "InboundPingReplyPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"105", - "Protocol":"1", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}, - "Icmp": { - "Code": "0", - "Type": "8" - } - } - }, - "OutboundPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"true", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"0", - "To":"65535" - } - } - }, - "OutboundPingRequestPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"101", - "Protocol":"1", - "RuleAction":"allow", - "Egress":"true", - "CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}, - "Icmp": { - "Code": "0", - "Type": "0" - } - } - }, - "OutboundPingReplyPublicNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - }, - "RuleNumber":"102", - "Protocol":"1", - "RuleAction":"allow", - "Egress":"true", - "CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}, - "Icmp": { - "Code": "0", - "Type": "8" - } - } - }, - "PublicSubnetNetworkAclAssociation01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - } - } - }, - "PublicSubnetNetworkAclAssociation02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"PublicSubnet02" - }, - "NetworkAclId":{ - "Ref":"PublicNetworkAcl" - } - } - }, - "PrivateRouteTable":{ - "Type":"AWS::EC2::RouteTable", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Private" - } - ] - } - }, - "PrivateRoute":{ - "Type":"AWS::EC2::Route", - "Properties":{ - "RouteTableId":{ - "Ref":"PrivateRouteTable" - }, - "DestinationCidrBlock":"0.0.0.0/0", - "InstanceId":{ - "Ref":"NATDevice" - } - } - }, - "PrivateSubnetRouteTableAssociationAdmin":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"AdminSubnet" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationEdxapp01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"EdxappSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationEdxapp02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"EdxappSubnet02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationXqueue01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XqueueSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationXqueue02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XqueueSubnet02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationCommonCluster01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"CommonClusterSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationCommonCluster02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"CommonClusterSubnet02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationCommonCluster03":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"CommonClusterSubnet03" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationXServer01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XServerSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationXServer02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XServerSubnet02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationData01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Data01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationData02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Data02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationCache01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Cache01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationCache02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Cache02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationWorker01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"WorkerSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationWorker02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"WorkerSubnet02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationForum01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"ForumSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationForum02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"ForumSubnet02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationNotifier01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"NotifierSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationMongo01":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"MongoSubnet01" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationMongo02":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"MongoSubnet02" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateSubnetRouteTableAssociationMongo03":{ - "Type":"AWS::EC2::SubnetRouteTableAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"MongoSubnet03" - }, - "RouteTableId":{ - "Ref":"PrivateRouteTable" - } - } - }, - "PrivateNetworkAcl":{ - "Type":"AWS::EC2::NetworkAcl", - "Properties":{ - "VpcId":{ - "Ref":"EdxVPC" - }, - "Tags":[ - { - "Key":"Application", - "Value":{ - "Ref":"AWS::StackId" - } - }, - { - "Key":"Network", - "Value":"Private" - } - ] - } - }, - "InboundPrivateNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"false", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"0", - "To":"65535" - } - } - }, - "OutBoundPrivateNetworkAclEntry":{ - "Type":"AWS::EC2::NetworkAclEntry", - "Properties":{ - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - }, - "RuleNumber":"100", - "Protocol":"6", - "RuleAction":"allow", - "Egress":"true", - "CidrBlock":"0.0.0.0/0", - "PortRange":{ - "From":"0", - "To":"65535" - } - } - }, - "PrivateSubnetNetworkAclAssociationAdmin":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"AdminSubnet" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationEdxapp01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"EdxappSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationEdxapp02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"EdxappSubnet02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationXqueue01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XqueueSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationXqueue02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XqueueSubnet02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationCommonCluster01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"CommonClusterSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationCommonCluster02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"CommonClusterSubnet02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationCommonCluster03":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"CommonClusterSubnet03" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationXServer01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XServerSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationXServer02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"XServerSubnet02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationData01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Data01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationData02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Data02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationCache01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Cache01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationCache02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"Cache02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationWorker01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"WorkerSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationWorker02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"WorkerSubnet02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationForum01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"ForumSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationForum02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"ForumSubnet02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationNotifier01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"NotifierSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationMongo01":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"MongoSubnet01" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationMongo02":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"MongoSubnet02" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "PrivateSubnetNetworkAclAssociationMongo03":{ - "Type":"AWS::EC2::SubnetNetworkAclAssociation", - "Properties":{ - "SubnetId":{ - "Ref":"MongoSubnet03" - }, - "NetworkAclId":{ - "Ref":"PrivateNetworkAcl" - } - } - }, - "NATIPAddress":{ - "Type":"AWS::EC2::EIP", - "Properties":{ - "Domain":"vpc", - "InstanceId":{ - "Ref":"NATDevice" - } - } - }, - "NATDevice":{ - "Type":"AWS::EC2::Instance", - "Properties":{ - "InstanceType":{ - "Ref":"NATInstanceType" - }, - "KeyName":{ - "Ref":"KeyName" - }, - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "SourceDestCheck":"false", - "ImageId":{ - "Fn::FindInMap":[ - "AWSNATAMI", - { - "Ref":"AWS::Region" - }, - "AMI" - ] - }, - "SecurityGroupIds":[ - { - "Ref":"NATSecurityGroup" - } - ] - } - }, - "BackupNATIPAddress":{ - "Type":"AWS::EC2::EIP", - "Properties":{ - "Domain":"vpc", - "InstanceId":{ - "Ref":"BackupNATDevice" - } - } - }, - "BackupNATDevice":{ - "Type":"AWS::EC2::Instance", - "Properties":{ - "InstanceType":{ - "Ref":"NATInstanceType" - }, - "KeyName":{ - "Ref":"KeyName" - }, - "SubnetId":{ - "Ref":"PublicSubnet02" - }, - "SourceDestCheck":"false", - "ImageId":{ - "Fn::FindInMap":[ - "AWSNATAMI", - { - "Ref":"AWS::Region" - }, - "AMI" - ] - }, - "SecurityGroupIds":[ - { - "Ref":"NATSecurityGroup" - } - ] - } - }, - "NATSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable internal access to the NAT device", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"9418", - "ToPort":"9418", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"9997", - "ToPort":"9997", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"10016", - "ToPort":"10016", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"11371", - "ToPort":"11371", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"icmp", - "FromPort":"-1", - "ToPort":"-1", - "CidrIp":"0.0.0.0/0" - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"9997", - "ToPort":"9997", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"9418", - "ToPort":"9418", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"10016", - "ToPort":"10016", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"11371", - "ToPort":"11371", - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "NATMonitorRole": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ { - "Effect": "Allow", - "Principal": { - "Service": [ "ec2.amazonaws.com" ] - }, - "Action": [ "sts:AssumeRole" ] - } ] - }, - "Path": "/", - "Policies": [ { - "PolicyName": "NAT_Takeover", - "PolicyDocument": { - "Statement": [ { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRouteTables", - "ec2:CreateRoute", - "ec2:ReplaceRoute", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "*" - } ] - } - } ] - } - }, - "NATMonitorRoleProfile": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "Path": "/", - "Roles": [ { - "Ref": "NATMonitorRole" - } ] - } - }, - "BastionIPAddress":{ - "Type":"AWS::EC2::EIP", - "Properties":{ - "Domain":"vpc", - "InstanceId":{ - "Ref":"BastionHost" - } - } - }, - "BastionHost":{ - "Type":"AWS::EC2::Instance", - "Properties":{ - "InstanceType":{ - "Ref":"BastionInstanceType" - }, - "KeyName":{ - "Ref":"KeyName" - }, - "IamInstanceProfile" : { - "Ref" : "NATMonitorRoleProfile" - }, - "SubnetId":{ - "Ref":"PublicSubnet01" - }, - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"BastionInstanceType" - }, - "Arch" - ] - } - ] - }, - "SecurityGroupIds":[ - { - "Ref":"BastionSecurityGroup" - } - ], - "Tags":[ - { - "Key":"play", - "Value":"bastion" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "UserData": { "Fn::Base64" : { "Fn::Join" : ["", [ - "#!/bin/bash -v\n", - "mkdir -p /opt/edx/bin\n", - "cd /opt\n", - "apt-get update\n", - "apt-get install openjdk-6-jre-headless unzip -y\n", - "wget http://s3.amazonaws.com/ec2-downloads/ec2-api-tools.zip\n", - "unzip ec2-api-tools.zip\n", - "rm ec2-api-tools.zip\n", - "ln -sf ec2-api-tools-* ec2-api-tools\n", - - "cat <<'EOF' > /opt/edx/bin/nat_monitor.sh\n", - "#!/bin/bash\n", - "# This script will monitor another NAT instance and take over its routes\n", - "# if communication with the other instance fails\n", - "\n", - "# NAT instance variables\n", - "# Other instance's IP to ping and route to grab if other node goes down\n", - "PRIMARY_NAT_ID=", { "Ref":"NATDevice" }, "\n", - "BACKUP_NAT_ID=", { "Ref": "BackupNATDevice" }, "\n", - "NAT_RT_ID=", { "Ref": "PrivateRouteTable" }, "\n", - "\n", - "# Specify the EC2 region that this will be running in (e.g. https://ec2.us-east-1.amazonaws.com)\n", - "EC2_URL=https://ec2.",{ "Ref": "AWS::Region" },".amazonaws.com\n", - "\n", - "# Health Check variables\n", - "Num_Pings=3\n", - "Ping_Timeout=1\n", - "Wait_Between_Pings=2\n", - "Wait_for_Instance_Stop=60\n", - "Wait_for_Instance_Start=300\n", - "\n", - "# leverage AWS security credentials provided by EC2 roles\n", - "# Setup environment for ec2 api tools\n", - "export EC2_HOME=/opt/ec2-api-tools\n", - "export AWS_IAM_HOME=/opt/IAMCli\n", - "export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-amd64\n", - "PATH=/opt/ec2-api-tools/bin:$PATH\n", - "\n", - "# Determine the NAT instance private IP so we can ping the other NAT instance, take over\n", - "# its route, and reboot it. Requires EC2 DescribeInstances, ReplaceRoute, and Start/RebootInstances\n", - "# permissions. The following example EC2 Roles policy will authorize these commands:\n", - "# {\n", - "# \"Statement\": [\n", - "# {\n", - "# \"Action\": [\n", - "# \"ec2:DescribeInstances\",\n", - "# \"ec2:CreateRoute\",\n", - "# \"ec2:ReplaceRoute\",\n", - "# \"ec2:StartInstances\",\n", - "# \"ec2:StopInstances\"\n", - "# ],\n", - "# \"Effect\": \"Allow\",\n", - "# \"Resource\": \"*\"\n", - "# }\n", - "# ]\n", - "# }\n", - "\n", - "# Get the primary NAT instance's IP\n", - "PRIMARY_NAT_IP=`/opt/ec2-api-tools/bin/ec2-describe-instances $PRIMARY_NAT_ID -U $EC2_URL | grep PRIVATEIPADDRESS -m 1 | awk '{print $2;}'`\n", - "BACKUP_NAT_IP=`/opt/ec2-api-tools/bin/ec2-describe-instances $BACKUP_NAT_ID -U $EC2_URL | grep PRIVATEIPADDRESS -m 1 | awk '{print $2;}'`\n", - "\n", - "echo `date` \"-- Starting NAT monitor\"\n", - "\n", - "while [ . ]; do\n", - " # Check the health of both instances.\n", - " primary_pingresult=`ping -c $Num_Pings -W $Ping_Timeout $PRIMARY_NAT_IP| grep time= | wc -l`\n", - "\n", - " if [ \"$primary_pingresult\" == \"0\" ]; then\n", - " backup_pingresult=`ping -c $Num_Pings -W $Ping_Timeout $BACKUP_NAT_IP| grep time= | wc -l`\n", - " if [ \"$backup_pingresult\" == \"0\" ]; then\n", - " echo `date` \"-- Both NAT devices un reachable.\"\n", - " #TODO: Notify alert that both NATs are down.\n", - " else #Backup nat is healthy.\n", - " # Set HEALTHY variables to unhealthy (0)\n", - " ROUTE_HEALTHY=0\n", - " NAT_HEALTHY=0\n", - " STOPPING_NAT=0\n", - " while [ \"$NAT_HEALTHY\" == \"0\" ]; do\n", - " # Primary NAT instance is unhealthy, loop while we try to fix it\n", - " if [ \"$ROUTE_HEALTHY\" == \"0\" ]; then\n", - " echo `date` \"-- NAT($PRIMARY_NAT_ID) heartbeat failed, using $BACKUP_NAT_ID for $NAT_RT_ID default route\"\n", - " /opt/ec2-api-tools/bin/ec2-replace-route $NAT_RT_ID -r 0.0.0.0/0 -i $BACKUP_NAT_ID -U $EC2_URL\n", - " ROUTE_HEALTHY=1\n", - " fi\n", - " # Check NAT state to see if we should stop it or start it again\n", - " NAT_STATE=`/opt/ec2-api-tools/bin/ec2-describe-instances $PRIMARY_NAT_ID -U $EC2_URL | grep INSTANCE | awk '{print $5;}'`\n", - " if [ \"$NAT_STATE\" == \"stopped\" ]; then\n", - " echo `date` \"-- NAT($PRIMARY_NAT_ID) instance stopped, starting it back up\"\n", - " /opt/ec2-api-tools/bin/ec2-start-instances $PRIMARY_NAT_ID -U $EC2_URL\n", - " sleep $Wait_for_Instance_Start\n", - " else\n", - " if [ \"$STOPPING_NAT\" == \"0\" ]; then\n", - " echo `date` \"-- NAT($PRIMARY_NAT_ID) instance $NAT_STATE, attempting to stop for reboot\"\n", - " /opt/ec2-api-tools/bin/ec2-stop-instances $PRIMARY_NAT_ID -U $EC2_URL\n", - " STOPPING_NAT=1\n", - " fi\n", - " sleep $Wait_for_Instance_Stop\n", - " fi\n", - " unhealthy_nat_pingresult=`ping -c $Num_Pings -W $Ping_Timeout $PRIMARY_NAT_IP| grep time= | wc -l`\n", - " if [ \"$unhealthy_nat_pingresult\" == \"$Num_Pings\" ]; then\n", - " NAT_HEALTHY=1\n", - " fi\n", - " done\n", - "\n", - " # Backup nat was healthy so we switched to it. It is now the primary.\n", - " if [ \"$ROUTE_HEALTHY\" == \"1\" ]; then\n", - " TEMP_NAT_ID=$PRIMARY_NAT_ID\n", - " TEMP_NAT_IP=$PRIMARY_NAT_IP\n", - "\n", - " PRIMARY_NAT_ID=$BACKUP_NAT_ID\n", - " PRIMARY_NAT_IP=$BACKUP_NAT_IP\n", - "\n", - " BACKUP_NAT_ID=$TEMP_NAT_ID\n", - " BACKUP_NAT_IP=$TEMP_NAT_IP\n", - " fi\n", - " fi\n", - " else\n", - " sleep $Wait_Between_Pings\n", - " fi\n", - "done\n", - "EOF\n", - "chmod u+x /opt/edx/bin/nat_monitor.sh\n", - "echo '@reboot /opt/edx/bin/nat_monitor.sh > /var/log/nat_monitor.log' | crontab\n", - "/opt/edx/bin/nat_monitor.sh > /var/log/nat_monitor.log &\n" - ]]}} - } - }, - "BastionSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable access to the Bastion host", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"icmp", - "FromPort":"-1", - "ToPort":"-1", - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "EdxappRole": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ { - "Effect": "Allow", - "Principal": { - "Service": [ "ec2.amazonaws.com" ] - }, - "Action": [ "sts:AssumeRole" ] - } ] - }, - "Path": "/", - "Policies": [ { - "PolicyName": "EdxAppBasePolicy", - "PolicyDocument": { - "Statement":[ - { - "Effect":"Allow", - "Action":[ - "cloudformation:DescribeStackResource", - "s3:Put", - "ses:SendEmail", - "ses:SendRawEmail", - "ses:GetSendQuota" - ], - "Resource":"*" - } - ] - } - } ] - } - }, - "EdxappInstanceProfile": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "Path": "/", - "Roles": [ { - "Ref": "EdxappRole" - } ] - } - }, - "XqueueRole": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ { - "Effect": "Allow", - "Principal": { - "Service": [ "ec2.amazonaws.com" ] - }, - "Action": [ "sts:AssumeRole" ] - } ] - }, - "Path": "/" - } - }, - "XqueueInstanceProfile": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "Path": "/", - "Roles": [ { - "Ref": "XqueueRole" - } ] - } - }, - "XServerRole": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ { - "Effect": "Allow", - "Principal": { - "Service": [ "ec2.amazonaws.com" ] - }, - "Action": [ "sts:AssumeRole" ] - } ] - }, - "Path": "/" - } - }, - "XServerInstanceProfile": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "Path": "/", - "Roles": [ { - "Ref": "XServerRole" - } ] - } - }, - "AdminSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Admin Security Group", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - } - ] - } - }, - "EdxappServer":{ - "Type":"AWS::AutoScaling::LaunchConfiguration", - "Properties":{ - "IamInstanceProfile":{ "Ref":"EdxappInstanceProfile" }, - "SecurityGroups":[ - { - "Ref":"EdxappServerSecurityGroup" - } - ], - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"EdxappInstanceType" - }, - "Arch" - ] - } - ] - }, - "KeyName":{ - "Ref":"KeyName" - }, - "InstanceType":{ - "Ref":"EdxappInstanceType" - }, - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/sda1", - "Ebs":{ - "VolumeSize":"100" - } - } - ] - } - }, - "EdxappServerASGroup":{ - "Type":"AWS::AutoScaling::AutoScalingGroup", - "Properties":{ - "AvailabilityZones":[ - { - "Fn::GetAtt":[ - "EdxappSubnet01", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "EdxappSubnet02", - "AvailabilityZone" - ] - } - ], - "VPCZoneIdentifier":[ - { - "Ref":"EdxappSubnet01" - }, - { - "Ref":"EdxappSubnet02" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"edxapp"]]}, - "PropagateAtLaunch":true - }, - { - "Key":"play", - "Value":"edxapp", - "PropagateAtLaunch":true - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "LaunchConfigurationName":{ - "Ref":"EdxappServer" - }, - "MinSize":{ - "Ref":"EdxappDesiredCapacity" - }, - "MaxSize":{ - "Ref":"EdxappDesiredCapacity" - }, - "DesiredCapacity":{ - "Ref":"EdxappDesiredCapacity" - }, - "LoadBalancerNames":[ - { - "Ref":"EdxappELB" - } - ] - } - }, - "EdxappServerScaleUpPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"EdxappServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"1" - } - }, - "EdxappServerScaleDownPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"EdxappServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"-1" - } - }, - "EdxappCPUAlarmHigh":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-up if CPU > 90% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"90", - "AlarmActions":[ - { - "Ref":"EdxappServerScaleUpPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"EdxappServerASGroup" - } - } - ], - "ComparisonOperator":"GreaterThanThreshold" - } - }, - "EdxappCPUAlarmLow":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-down if CPU < 70% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"70", - "AlarmActions":[ - { - "Ref":"EdxappServerScaleDownPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"EdxappServerASGroup" - } - } - ], - "ComparisonOperator":"LessThanThreshold" - } - }, - "EdxappELB":{ - "Type":"AWS::ElasticLoadBalancing::LoadBalancer", - "Properties":{ - "LBCookieStickinessPolicy" : [{ - "PolicyName" : "EdxappStickinessPolicy", - "CookieExpirationPeriod" : "180" - } ], - "SecurityGroups":[ - { - "Ref":"EdxappELBSecurityGroup" - } - ], - "Listeners":[ - { - "LoadBalancerPort":"80", - "InstancePort":{ - "Ref":"EdxappServerPort" - }, - "Protocol":"HTTP" - }, - { - "LoadBalancerPort":"443", - "InstancePort":{ - "Ref":"EdxappServerPort" - }, - "Protocol":"HTTPS", - "InstanceProtocol":"HTTP", - "SSLCertificateId": { - "Ref": "SSLCertificateARN" - } - } - ], - "HealthCheck":{ - "Target": { "Fn::Join":[ "", - [ - "HTTP:", - { "Ref": "EdxappServerPort" }, - "/heartbeat" - ] - ]}, - "HealthyThreshold":"3", - "UnhealthyThreshold":"5", - "Interval":"30", - "Timeout":"5" - }, - "Subnets":[ - { - "Ref":"PublicSubnet01" - }, - { - "Ref":"PublicSubnet02" - } - ] - } - }, - "EdxappELBSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable HTTP access on port 80", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort":{ - "Ref":"EdxappServerPort" - }, - "ToPort":{ - "Ref":"EdxappServerPort" - }, - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "EdxappServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus Edx Server required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort":{ - "Ref":"EdxappServerPort" - }, - "ToPort":{ - "Ref":"EdxappServerPort" - }, - "CidrIp":"0.0.0.0/0" - } - ], - "Tags":[ - { - "Key":"play", - "Value":"edxapp" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ] - } - }, - "XqueueServer":{ - "Type":"AWS::AutoScaling::LaunchConfiguration", - "Properties":{ - "IamInstanceProfile":{ "Ref":"XqueueInstanceProfile" }, - "SecurityGroups":[ - { - "Ref":"XqueueServerSecurityGroup" - } - ], - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"XqueueInstanceType" - }, - "Arch" - ] - } - ] - }, - "KeyName":{ - "Ref":"KeyName" - }, - "InstanceType":{ - "Ref":"XqueueInstanceType" - }, - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/sda1", - "Ebs":{ - "VolumeSize":"100" - } - } - ] - } - }, - "XqueueServerASGroup":{ - "Type":"AWS::AutoScaling::AutoScalingGroup", - "Properties":{ - "AvailabilityZones":[ - { - "Fn::GetAtt":[ - "XqueueSubnet01", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "XqueueSubnet02", - "AvailabilityZone" - ] - } - ], - "VPCZoneIdentifier":[ - { - "Ref":"XqueueSubnet01" - }, - { - "Ref":"XqueueSubnet02" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"xqueue"]]}, - "PropagateAtLaunch":true - }, - { - "Key":"play", - "Value":"xqueue", - "PropagateAtLaunch":true - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "LaunchConfigurationName":{ - "Ref":"XqueueServer" - }, - "MinSize":{ - "Ref":"XqueueDesiredCapacity" - }, - "MaxSize":{ - "Ref":"XqueueDesiredCapacity" - }, - "DesiredCapacity":{ - "Ref":"XqueueDesiredCapacity" - }, - "LoadBalancerNames":[ - { - "Ref":"XqueueELB" - } - ] - } - }, - "XqueueScaleUpPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"XqueueServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"1" - } - }, - "XqueueScaleDownPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"XqueueServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"-1" - } - }, - "XqueueCPUAlarmHigh":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-up if CPU > 90% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"90", - "AlarmActions":[ - { - "Ref":"XqueueScaleUpPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"XqueueServerASGroup" - } - } - ], - "ComparisonOperator":"GreaterThanThreshold" - } - }, - "XqueueCPUAlarmLow":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-down if CPU < 70% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"70", - "AlarmActions":[ - { - "Ref":"XqueueScaleDownPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"XqueueServerASGroup" - } - } - ], - "ComparisonOperator":"LessThanThreshold" - } - }, - "XqueueELB":{ - "Type":"AWS::ElasticLoadBalancing::LoadBalancer", - "Properties":{ - "SecurityGroups":[ - { - "Ref":"XqueueELBSecurityGroup" - } - ], - "Listeners":[ - { - "LoadBalancerPort":"80", - "InstancePort": { "Ref": "XqueueServerPort" }, - "Protocol":"HTTP" - }, - { - "LoadBalancerPort":"443", - "InstancePort": { "Ref": "XqueueServerPort" }, - "Protocol":"HTTPS", - "InstanceProtocol":"HTTP", - "SSLCertificateId": { - "Ref": "SSLCertificateARN" - } - } - ], - "HealthCheck":{ - "Target": { "Fn::Join":[ "", - [ - "HTTP:", - { "Ref": "XqueueServerPort" }, - "/xqueue/status/" - ] - ]}, - "HealthyThreshold":"3", - "UnhealthyThreshold":"5", - "Interval":"30", - "Timeout":"5" - }, - "Subnets":[ - { - "Ref":"PublicSubnet01" - }, - { - "Ref":"PublicSubnet02" - } - ] - } - }, - "XqueueELBSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable HTTP access on port 80", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "CidrIp":"0.0.0.0/0" - }, - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort": { "Ref": "XqueueServerPort" }, - "ToPort": { "Ref": "XqueueServerPort" }, - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "XqueueServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus Edx Server required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort": { "Ref": "XqueueServerPort" }, - "ToPort": { "Ref": "XqueueServerPort" }, - "CidrIp":"0.0.0.0/0" - } - ], - "Tags":[ - { - "Key":"play", - "Value":"xqueue" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ] - } - }, - "CommonClusterServer":{ - "Type":"AWS::AutoScaling::LaunchConfiguration", - "Properties":{ - "SecurityGroups":[ - { - "Ref":"RabbitMQServerSecurityGroup" - }, - { - "Ref":"ElasticsearchServerSecurityGroup" - } - ], - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"CommonClusterInstanceType" - }, - "Arch" - ] - } - ] - }, - "KeyName":{ - "Ref":"KeyName" - }, - "InstanceType":{ - "Ref":"CommonClusterInstanceType" - }, - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/sda1", - "Ebs":{ - "VolumeSize":"100" - } - } - ] - } - }, - "CommonClusterServerASGroup":{ - "Type":"AWS::AutoScaling::AutoScalingGroup", - "Properties":{ - "AvailabilityZones":[ - { - "Fn::GetAtt":[ - "CommonClusterSubnet01", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "CommonClusterSubnet02", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "CommonClusterSubnet03", - "AvailabilityZone" - ] - } - ], - "VPCZoneIdentifier":[ - { - "Ref":"CommonClusterSubnet01" - }, - { - "Ref":"CommonClusterSubnet02" - }, - { - "Ref":"CommonClusterSubnet03" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"commoncluster"]]}, - "PropagateAtLaunch":true - }, - { - "Key":"play", - "Value":"commoncluster", - "PropagateAtLaunch":true - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "LaunchConfigurationName":{ - "Ref":"CommonClusterServer" - }, - "MinSize":{ - "Ref":"CommonClusterDesiredCapacity" - }, - "MaxSize":{ - "Ref":"CommonClusterDesiredCapacity" - }, - "DesiredCapacity":{ - "Ref":"CommonClusterDesiredCapacity" - }, - "LoadBalancerNames":[ - { - "Ref":"RabbitMQELB" - }, - { - "Ref":"ElasticSearchELB" - } - ] - } - }, - "CommonClusterCPUAlarmHigh":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Alarm if CPU > 90% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"90", - "AlarmActions":[], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"CommonClusterServerASGroup" - } - } - ], - "ComparisonOperator":"GreaterThanThreshold" - } - }, - "CommonClusterCPUAlarmLow":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Alarm if CPU < 70% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"70", - "AlarmActions":[], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"CommonClusterServerASGroup" - } - } - ], - "ComparisonOperator":"LessThanThreshold" - } - }, - "ElasticSearchELB":{ - "Type":"AWS::ElasticLoadBalancing::LoadBalancer", - "Properties":{ - "Scheme":"internal", - "SecurityGroups":[ - { - "Ref":"ElasticSearchELBSecurityGroup" - } - ], - "Listeners":[ - { - "LoadBalancerPort":"9200", - "InstancePort":"9200", - "Protocol":"TCP" - }, - { - "LoadBalancerPort":"9300", - "InstancePort":"9300", - "Protocol":"TCP" - } - ], - "HealthCheck":{ - "Target":"TCP:9200", - "HealthyThreshold":"3", - "UnhealthyThreshold":"5", - "Interval":"30", - "Timeout":"5" - }, - "Subnets":[ - { - "Ref":"CommonClusterSubnet01" - }, - { - "Ref":"CommonClusterSubnet02" - }, - { - "Ref":"CommonClusterSubnet03" - } - ] - } - }, - "ElasticSearchELBSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable TCP access on elasticsearch ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"9200", - "ToPort":"9200", - "SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"9300", - "ToPort":"9300", - "SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" } - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]} - } - ] - } - }, - "ElasticsearchServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus Edx Server required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "SourceSecurityGroupId": { "Ref": "ElasticSearchELBSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "SourceSecurityGroupId": { "Ref": "ElasticSearchELBSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9200, - "ToPort": 9200, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": 9300, - "ToPort": 9300, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]} - } - ] - } - }, - "RabbitMQELB":{ - "Type":"AWS::ElasticLoadBalancing::LoadBalancer", - "Properties":{ - "Scheme":"internal", - "SecurityGroups":[ - { - "Ref":"RabbitMQELBSecurityGroup" - } - ], - "Listeners":[ - { - "LoadBalancerPort":"5672", - "InstancePort":"5672", - "Protocol":"TCP" - }, - { - "LoadBalancerPort":"6163", - "InstancePort":"6163", - "Protocol":"TCP" - } - ], - "HealthCheck":{ - "Target":"TCP:5672", - "HealthyThreshold":"3", - "UnhealthyThreshold":"5", - "Interval":"30", - "Timeout":"5" - }, - "Subnets":[ - { - "Ref":"CommonClusterSubnet01" - }, - { - "Ref":"CommonClusterSubnet02" - }, - { - "Ref":"CommonClusterSubnet03" - } - ] - } - }, - "RabbitMQELBSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable TCP access on rabbit ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "SourceSecurityGroupId": { "Ref": "EdxappServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "SourceSecurityGroupId": { "Ref": "EdxappServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "SourceSecurityGroupId": { "Ref": "XqueueServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "SourceSecurityGroupId": { "Ref": "XqueueServerSecurityGroup" } - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]} - } - ] - } - }, - "RabbitMQServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus Edx Server required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "SourceSecurityGroupId" : { - "Ref" : "RabbitMQELBSecurityGroup" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "SourceSecurityGroupId" : { - "Ref" : "RabbitMQELBSecurityGroup" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "SourceSecurityGroupId" : { - "Ref" : "XqueueServerSecurityGroup" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "SourceSecurityGroupId" : { - "Ref" : "XqueueServerSecurityGroup" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"5672", - "ToPort":"5672", - "SourceSecurityGroupId" : { - "Ref" : "EdxappServerSecurityGroup" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"6163", - "ToPort":"6163", - "SourceSecurityGroupId" : { - "Ref" : "EdxappServerSecurityGroup" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"0", - "ToPort":"65535", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"0", - "ToPort":"65535", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"0", - "ToPort":"65535", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort":"15672", - "ToPort":"15672", - "SourceSecurityGroupId" : { - "Ref" : "BastionSecurityGroup" - } - } - ], - "Tags":[ - { - "Key":"play", - "Value":"rabbitmq" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ] - } - }, - "XServer":{ - "Type":"AWS::AutoScaling::LaunchConfiguration", - "Properties":{ - "IamInstanceProfile":{ "Ref":"XServerInstanceProfile" }, - "SecurityGroups":[ - { - "Ref":"XServerSecurityGroup" - } - ], - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"XserverInstanceType" - }, - "Arch" - ] - } - ] - }, - "KeyName":{ - "Ref":"KeyName" - }, - "InstanceType":{ - "Ref":"XserverInstanceType" - }, - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/sda1", - "Ebs":{ - "VolumeSize":"100" - } - } - ] - } - }, - "XServerASGroup":{ - "Type":"AWS::AutoScaling::AutoScalingGroup", - "Properties":{ - "AvailabilityZones":[ - { - "Fn::GetAtt":[ - "XServerSubnet01", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "XServerSubnet02", - "AvailabilityZone" - ] - } - ], - "VPCZoneIdentifier":[ - { - "Ref":"XServerSubnet01" - }, - { - "Ref":"XServerSubnet02" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"xserver"]]}, - "PropagateAtLaunch":true - }, - { - "Key":"play", - "Value":"xserver", - "PropagateAtLaunch":true - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "LaunchConfigurationName":{ - "Ref":"XServer" - }, - "MinSize":{ - "Ref":"XServerDesiredCapacity" - }, - "MaxSize":{ - "Ref":"XServerDesiredCapacity" - }, - "DesiredCapacity":{ - "Ref":"XServerDesiredCapacity" - }, - "LoadBalancerNames":[ - { - "Ref":"XServerELB" - } - ] - } - }, - "XServerScaleUpPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"XServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"1" - } - }, - "XServerScaleDownPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"XServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"-1" - } - }, - "XServerCPUAlarmHigh":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-up if CPU > 90% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"90", - "AlarmActions":[ - { - "Ref":"XServerScaleUpPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"XServerASGroup" - } - } - ], - "ComparisonOperator":"GreaterThanThreshold" - } - }, - "XServerCPUAlarmLow":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-down if CPU < 70% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"70", - "AlarmActions":[ - { - "Ref":"XServerScaleDownPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"XServerASGroup" - } - } - ], - "ComparisonOperator":"LessThanThreshold" - } - }, - "XServerELB":{ - "Type":"AWS::ElasticLoadBalancing::LoadBalancer", - "Properties":{ - "Scheme":"internal", - "SecurityGroups":[ - { - "Ref":"XServerELBSecurityGroup" - } - ], - "Listeners":[ - { - "LoadBalancerPort":"80", - "InstancePort":{ "Ref": "XserverServerPort" }, - "Protocol":"HTTP" - } - ], - "HealthCheck":{ - "Target": { "Fn::Join":[ "", - [ - "HTTP:", - { "Ref": "XserverServerPort" }, - "/" - ] - ]}, - "HealthyThreshold":"3", - "UnhealthyThreshold":"5", - "Interval":"30", - "Timeout":"5" - }, - "Subnets":[ - { - "Ref":"XServerSubnet01" - }, - { - "Ref":"XServerSubnet02" - } - ] - } - }, - "XServerELBSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable TCP access on xserver ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"80", - "ToPort":"80", - "SourceSecurityGroupId": { "Ref": "XqueueServerSecurityGroup" } - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort": {"Ref": "XserverServerPort"}, - "ToPort": {"Ref": "XserverServerPort"}, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "XServerJail01", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": {"Ref": "XserverServerPort"}, - "ToPort": {"Ref": "XserverServerPort"}, - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "XServerJail02", "CIDR"]}]]} - } - ] - } - }, - "XServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus XServer required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]} - }, - { - "IpProtocol":"tcp", - "FromPort": { "Ref": "XserverServerPort" }, - "ToPort": { "Ref": "XserverServerPort" }, - "SourceSecurityGroupId": { "Ref": "XServerELBSecurityGroup" } - } - ], - "Tags":[ - { - "Key":"play", - "Value":"xserver" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ] - } - }, - "EdxDataSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up access to the data subnet", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"3306", - "ToPort":"3306", - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "EdxDBSubnetGroup":{ - "Type":"AWS::RDS::DBSubnetGroup", - "Properties":{ - "DBSubnetGroupDescription":"Subnets available for the RDS DB Instance", - "SubnetIds":[ - { - "Ref":"Data01" - }, - { - "Ref":"Data02" - } - ] - } - }, - "DBSecurityGroup":{ - "Type":"AWS::RDS::DBSecurityGroup", - "Properties":{ - "EC2VpcId":{ - "Ref":"EdxVPC" - }, - "DBSecurityGroupIngress":[ - { - "EC2SecurityGroupId":{ - "Ref":"EdxappServerSecurityGroup" - } - }, - { - "EC2SecurityGroupId":{ - "Ref":"WorkerServerSecurityGroup" - } - }, - { - "EC2SecurityGroupId":{ - "Ref":"XqueueServerSecurityGroup" - } - } - ], - "GroupDescription":"Data access" - } - }, - "EdxDB":{ - "Type":"AWS::RDS::DBInstance", - "Properties":{ - "DBName":{ - "Ref":"DBName" - }, - "AllocatedStorage":{ - "Ref":"DBAllocatedStorage" - }, - "DBInstanceClass":{ - "Ref":"DBClass" - }, - "Engine":"MySQL", - "EngineVersion":{ "Ref": "DBEngineVersion" }, - "MasterUsername":{ - "Ref":"DBUsername" - }, - "MasterUserPassword":{ - "Ref":"DBPassword" - }, - "DBSubnetGroupName":{ - "Ref":"EdxDBSubnetGroup" - }, - "DBSecurityGroups":[ - { - "Ref":"DBSecurityGroup" - } - ], - "Tags":[ - { - "Key":"play", - "Value":"rds" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ], - "MultiAZ":"true" - } - }, - "CacheSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Access to the elastic cache cluster", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort": { "Ref":"CacheNodePort" }, - "ToPort": { "Ref":"CacheNodePort" }, - "SourceSecurityGroupId":{ - "Ref":"EdxappServerSecurityGroup" - } - }, - { - "IpProtocol":"tcp", - "FromPort": { "Ref":"CacheNodePort" }, - "ToPort": { "Ref":"CacheNodePort" }, - "SourceSecurityGroupId":{ - "Ref":"WorkerServerSecurityGroup" - } - } - ] - } - }, - "WorkerServer":{ - "Type":"AWS::AutoScaling::LaunchConfiguration", - "Properties":{ - "SecurityGroups":[ - { - "Ref":"WorkerServerSecurityGroup" - } - ], - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"WorkerInstanceType" - }, - "Arch" - ] - } - ] - }, - "KeyName":{ - "Ref":"KeyName" - }, - "InstanceType":{ - "Ref":"WorkerInstanceType" - }, - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/sda1", - "Ebs":{ - "VolumeSize":"100" - } - } - ] - } - }, - "WorkerServerASGroup":{ - "Type":"AWS::AutoScaling::AutoScalingGroup", - "Properties":{ - "AvailabilityZones":[ - { - "Fn::GetAtt":[ - "WorkerSubnet01", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "WorkerSubnet02", - "AvailabilityZone" - ] - } - ], - "VPCZoneIdentifier":[ - { - "Ref":"WorkerSubnet01" - }, - { - "Ref":"WorkerSubnet02" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"worker"]]}, - "PropagateAtLaunch":true - }, - { - "Key":"play", - "Value":"worker", - "PropagateAtLaunch":true - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "LaunchConfigurationName":{ - "Ref":"WorkerServer" - }, - "MinSize":{ - "Ref":"WorkerDesiredCapacity" - }, - "MaxSize":{ - "Ref":"WorkerDesiredCapacity" - }, - "DesiredCapacity":{ - "Ref":"WorkerDesiredCapacity" - } - } - }, - "WorkerServerScaleUpPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"WorkerServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"1" - } - }, - "WorkerServerScaleDownPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"WorkerServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"-1" - } - }, - "WorkerCPUAlarmHigh":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-up if CPU > 90% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"90", - "AlarmActions":[ - { - "Ref":"WorkerServerScaleUpPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"WorkerServerASGroup" - } - } - ], - "ComparisonOperator":"GreaterThanThreshold" - } - }, - "WorkerCPUAlarmLow":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-down if CPU < 70% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"70", - "AlarmActions":[ - { - "Ref":"WorkerServerScaleDownPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"WorkerServerASGroup" - } - } - ], - "ComparisonOperator":"LessThanThreshold" - } - }, - "WorkerServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus Edx Server required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - } - ], - "Tags":[ - { - "Key":"play", - "Value":"worker" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ] - } - }, - "ForumServer":{ - "Type":"AWS::AutoScaling::LaunchConfiguration", - "Properties":{ - "SecurityGroups":[ - { - "Ref":"ForumServerSecurityGroup" - } - ], - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"ForumInstanceType" - }, - "Arch" - ] - } - ] - }, - "KeyName":{ - "Ref":"KeyName" - }, - "InstanceType":{ - "Ref":"ForumInstanceType" - }, - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/sda1", - "Ebs":{ - "VolumeSize":"100" - } - } - ] - } - }, - "ForumServerASGroup":{ - "Type":"AWS::AutoScaling::AutoScalingGroup", - "Properties":{ - "AvailabilityZones":[ - { - "Fn::GetAtt":[ - "ForumSubnet01", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "ForumSubnet02", - "AvailabilityZone" - ] - } - ], - "VPCZoneIdentifier":[ - { - "Ref":"ForumSubnet01" - }, - { - "Ref":"ForumSubnet02" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"forum"]]}, - "PropagateAtLaunch":true - }, - { - "Key":"play", - "Value":"forum", - "PropagateAtLaunch":true - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "LaunchConfigurationName":{ - "Ref":"ForumServer" - }, - "MinSize":{ - "Ref":"ForumDesiredCapacity" - }, - "MaxSize":{ - "Ref":"ForumDesiredCapacity" - }, - "DesiredCapacity":{ - "Ref":"ForumDesiredCapacity" - }, - "LoadBalancerNames":[ - { - "Ref":"ForumELB" - } - ] - } - }, - "ForumServerScaleUpPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"ForumServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"1" - } - }, - "ForumServerScaleDownPolicy":{ - "Type":"AWS::AutoScaling::ScalingPolicy", - "Properties":{ - "AdjustmentType":"ChangeInCapacity", - "AutoScalingGroupName":{ - "Ref":"ForumServerASGroup" - }, - "Cooldown":"60", - "ScalingAdjustment":"-1" - } - }, - "ForumCPUAlarmHigh":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-up if CPU > 90% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"90", - "AlarmActions":[ - { - "Ref":"ForumServerScaleUpPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"ForumServerASGroup" - } - } - ], - "ComparisonOperator":"GreaterThanThreshold" - } - }, - "ForumCPUAlarmLow":{ - "Type":"AWS::CloudWatch::Alarm", - "Properties":{ - "AlarmDescription":"Scale-down if CPU < 70% for 10 minutes", - "MetricName":"CPUUtilization", - "Namespace":"AWS/EC2", - "Statistic":"Average", - "Period":"300", - "EvaluationPeriods":"2", - "Threshold":"70", - "AlarmActions":[ - { - "Ref":"ForumServerScaleDownPolicy" - } - ], - "Dimensions":[ - { - "Name":"AutoScalingGroupName", - "Value":{ - "Ref":"ForumServerASGroup" - } - } - ], - "ComparisonOperator":"LessThanThreshold" - } - }, - "ForumELB":{ - "Type":"AWS::ElasticLoadBalancing::LoadBalancer", - "Properties":{ - "SecurityGroups":[ - { - "Ref":"ForumELBSecurityGroup" - } - ], - "Listeners":[ - { - "LoadBalancerPort":"443", - "InstancePort":{ - "Ref":"ForumServerPort" - }, - "Protocol":"HTTPS", - "InstanceProtocol":"HTTP", - "SSLCertificateId": { - "Ref": "SSLCertificateARN" - } - } - ], - "HealthCheck":{ - "Target":{"Fn::Join":["", - ["TCP:", - {"Ref":"ForumServerPort"} - ] - ] - }, - "HealthyThreshold":"3", - "UnhealthyThreshold":"5", - "Interval":"30", - "Timeout":"5" - }, - "Subnets":[ - { - "Ref":"PublicSubnet01" - }, - { - "Ref":"PublicSubnet02" - } - ] - } - }, - "ForumELBSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Enable HTTPS access", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"443", - "ToPort":"443", - "CidrIp":"0.0.0.0/0" - } - ], - "SecurityGroupEgress":[ - { - "IpProtocol":"tcp", - "FromPort": { "Ref": "ForumServerPort" }, - "ToPort": { "Ref": "ForumServerPort" }, - "CidrIp":"0.0.0.0/0" - } - ] - } - }, - "ForumServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus Edx Server required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort": { "Ref": "ForumServerPort" }, - "ToPort": { "Ref": "ForumServerPort" }, - "SourceSecurityGroupId" : { - "Ref" : "ForumELBSecurityGroup" - } - } - ], - "Tags":[ - { - "Key":"play", - "Value":"forum" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ] - } - }, - "MongoServer":{ - "Type":"AWS::AutoScaling::LaunchConfiguration", - "Properties":{ - "SecurityGroups":[ - { - "Ref":"MongoServerSecurityGroup" - } - ], - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"MongoInstanceType" - }, - "Arch" - ] - } - ] - }, - "UserData":{ - "Fn::Base64":{ - "Fn::Join":[ - "", - [ - "#!/bin/bash -x\n", - "exec >> /home/ubuntu/cflog.log\n", - "exec 2>> /home/ubuntu/cflog.log\n", - "function error_exit\n", - "{\n", - " cfn-signal -e 1 -r \"$1\" '", - { - "Ref":"MongoServerWaitHandle" - }, - "'\n", - " exit 1\n", - "}\n", - "apt-get -y update\n", - "apt-get -y install python-setuptools\n", - "echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n", - "easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", - "echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n", - - "#Install lvm2 so we can create logical volumes.\n", - "apt-get -y install lvm2 mdadm\n", - - "## Waiting for EBS mounts to become available and set their read ahead.\n", - "for device in xvdh xvdi xvdj xvdk; do\n", - " while [ ! -e /dev/$device ]; do echo waiting for /dev/$device to attach; sleep 10; done\n", - " blockdev --setra 128 /dev/$device\n", - "done\n", - - "## Create RAID10 and persist configuration\n", - "mdadm --verbose --create /dev/md0 --level=10 --chunk=256 --raid-devices=4 /dev/xvdh /dev/xvdi /dev/xvdj /dev/xvdk | tee /tmp/mdadm.log 2>&1\n", - "echo '`mdadm --detail --scan`' | tee -a /etc/mdadm.conf\n", - - "## Set read-ahead on the new device\n", - "blockdev --setra 128 /dev/md0\n", - - "## Create physical and logical volumes\n", - "dd if=/dev/zero of=/dev/md0 bs=512 count=1\n", - "pvcreate /dev/md0\n", - "vgcreate vg0 /dev/md0\n", - "lvcreate -l 90%vg -n data vg0\n", - "lvcreate -l 5%vg -n log vg0\n", - "lvcreate -l 5%vg -n journal vg0\n", - - "## Create filesystems and mount point info\n", - "mke2fs -t ext4 -F /dev/vg0/data > /tmp/mke2fs1.log 2>&1\n", - "mke2fs -t ext4 -F /dev/vg0/log > /tmp/mke2fs2.log 2>&1\n", - "mke2fs -t ext4 -F /dev/vg0/journal > /tmp/mke2fs3.log 2>&1\n", - - "mkdir -p /edx/var/mongo/data\n", - "mkdir -p /edx/var/log/mongo\n", - "mkdir -p /edx/var/mongo/journal\n", - - "echo '/dev/vg0/data /edx/var/mongo/data ext4 defaults,auto,noatime,noexec 0 0' | tee -a /etc/fstab\n", - "echo '/dev/vg0/log /edx/var/log/mongo ext4 defaults,auto,noatime,noexec 0 0' | tee -a /etc/fstab\n", - "echo '/dev/vg0/journal /edx/var/mongo/journal ext4 defaults,auto,noatime,noexec 0 0' | tee -a /etc/fstab\n", - "mount /edx/var/mongo/data > /tmp/mount1.log 2>&1\n", - "mount /edx/var/log/mongo > /tmp/mount2.log 2>&1\n", - "mount /edx/var/mongo/journal > /tmp/mount3.log 2>&1\n", - - "ln -s /edx/var/mongo/journal /edx/var/mongo/data/journal\n", - - "# If all went well, signal success\n", - "cfn-signal -e $? -r 'Edx Mongo configuration' '", - { - "Ref":"MongoServerWaitHandle" - }, - "'\n" - ] - ] - } - }, - "KeyName":{ - "Ref":"KeyName" - }, - "InstanceType":{ - "Ref":"MongoInstanceType" - }, - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/xvdh", - "Ebs":{ - "VolumeSize": { "Ref":"MongoVolumeSize" } - } - }, - { - "DeviceName":"/dev/xvdi", - "Ebs":{ - "VolumeSize": { "Ref":"MongoVolumeSize" } - } - }, - { - "DeviceName":"/dev/xvdj", - "Ebs":{ - "VolumeSize": { "Ref":"MongoVolumeSize" } - } - }, - { - "DeviceName":"/dev/xvdk", - "Ebs":{ - "VolumeSize": { "Ref":"MongoVolumeSize" } - } - } - ] - } - }, - "MongoServerASGroup":{ - "Type":"AWS::AutoScaling::AutoScalingGroup", - "Properties":{ - "AvailabilityZones":[ - { - "Fn::GetAtt":[ - "MongoSubnet01", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "MongoSubnet02", - "AvailabilityZone" - ] - }, - { - "Fn::GetAtt":[ - "MongoSubnet03", - "AvailabilityZone" - ] - } - ], - "VPCZoneIdentifier":[ - { - "Ref":"MongoSubnet01" - }, - { - "Ref":"MongoSubnet02" - }, - { - "Ref":"MongoSubnet03" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"mongo"]]}, - "PropagateAtLaunch":true - }, - { - "Key":"play", - "Value":"mongo", - "PropagateAtLaunch":true - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - }, - "PropagateAtLaunch":true - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - }, - "PropagateAtLaunch":true - } - ], - "LaunchConfigurationName":{ - "Ref":"MongoServer" - }, - "MinSize":{ - "Ref":"MongoDesiredCapacity" - }, - "MaxSize":{ - "Ref":"MongoDesiredCapacity" - }, - "DesiredCapacity":{ - "Ref":"MongoDesiredCapacity" - } - } - }, - "MongoServerSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Open up SSH access plus Edx Server required ports", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - }, - { - "IpProtocol":"tcp", - "FromPort":"27017", - "ToPort":"27017", - "SourceSecurityGroupId": { "Ref": "EdxappServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"28017", - "ToPort":"28017", - "SourceSecurityGroupId": { "Ref": "EdxappServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"27017", - "ToPort":"27017", - "SourceSecurityGroupId": { "Ref": "WorkerServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"28017", - "ToPort":"28017", - "SourceSecurityGroupId": { "Ref": "WorkerServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"27017", - "ToPort":"27017", - "SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"28017", - "ToPort":"28017", - "SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" } - }, - { - "IpProtocol":"tcp", - "FromPort":"27017", - "ToPort":"27017", - "CidrIp":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo01", - "CIDR" - ]} - ]] - } - }, - { - "IpProtocol":"tcp", - "FromPort":"28017", - "ToPort":"28017", - "CidrIp":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo01", - "CIDR" - ]} - ]] - } - }, - { - "IpProtocol":"tcp", - "FromPort":"27017", - "ToPort":"27017", - "CidrIp":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo02", - "CIDR" - ]} - ]] - } - }, - { - "IpProtocol":"tcp", - "FromPort":"28017", - "ToPort":"28017", - "CidrIp":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo02", - "CIDR" - ]} - ]] - } - }, - { - "IpProtocol":"tcp", - "FromPort":"27017", - "ToPort":"27017", - "CidrIp":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo03", - "CIDR" - ]} - ]] - } - }, - { - "IpProtocol":"tcp", - "FromPort":"28017", - "ToPort":"28017", - "CidrIp":{ - "Fn::Join": ["", [ - "10.", { "Ref": "ClassB"}, - {"Fn::FindInMap":[ - "SubnetConfig", - "Mongo03", - "CIDR" - ]} - ]] - } - } - ], - "Tags":[ - { - "Key":"play", - "Value":"mongo" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ] - } - }, - "MongoServerWaitHandle":{ - "Type":"AWS::CloudFormation::WaitConditionHandle" - }, - "MongoServerWaitCondition":{ - "Type":"AWS::CloudFormation::WaitCondition", - "DependsOn":"MongoServer", - "Properties":{ - "Handle":{ - "Ref":"MongoServerWaitHandle" - }, - "Timeout":"2400" - } - }, - "CacheSubnetGroup" : { - "Type" : "AWS::ElastiCache::SubnetGroup", - "Properties" : { - "Description" : "Cache Subnet Group", - "SubnetIds" : [ { "Ref" : "Cache01" }, { "Ref" : "Cache02" } ] - } - }, - "CacheCluster" : { - "Type": "AWS::ElastiCache::CacheCluster", - "Properties": { - "NumCacheNodes" : { "Ref" : "NumberOfCacheNodes" }, - "CacheNodeType" : { "Ref" : "CacheNodeType" }, - "Engine" : "memcached", - "EngineVersion": "1.4.5", - "Port": { "Ref": "CacheNodePort" }, - "PreferredAvailabilityZone": { "Fn::FindInMap":[ - "MapRegionsToAvailZones", - { "Ref":"AWS::Region" }, - "AZone0" - ] - }, - "CacheParameterGroupName": "default.memcached1.4", - "AutoMinorVersionUpgrade": true, - "CacheSubnetGroupName" : { "Ref" : "CacheSubnetGroup" }, - "VpcSecurityGroupIds" : [ { "Ref" : "CacheSecurityGroup" } ] - } - }, - "NotifierRole": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ { - "Effect": "Allow", - "Principal": { - "Service": [ "ec2.amazonaws.com" ] - }, - "Action": [ "sts:AssumeRole" ] - } ] - }, - "Path": "/", - "Policies": [ { - "PolicyName": "NotifierBasePolicy", - "PolicyDocument": { - "Statement":[ - { - "Effect":"Allow", - "Action":[ - "cloudformation:DescribeStackResource", - "s3:Put", - "ses:SendEmail", - "ses:SendRawEmail", - "ses:GetSendQuota" - ], - "Resource":"*" - } - ] - } - } ] - } - }, - "NotifierInstanceProfile": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "Path": "/", - "Roles": [ { - "Ref": "NotifierRole" - } ] - } - }, - "NotifierHost":{ - "Type":"AWS::EC2::Instance", - "Properties":{ - "InstanceType":{ - "Ref":"NotifierInstanceType" - }, - "KeyName":{ - "Ref":"KeyName" - }, - "IamInstanceProfile" : { - "Ref" : "NotifierInstanceProfile" - }, - "SubnetId":{ - "Ref":"NotifierSubnet01" - }, - "ImageId":{ - "Fn::FindInMap":[ - "AWSRegionArch2AMI", - { - "Ref":"AWS::Region" - }, - { - "Fn::FindInMap":[ - "AWSInstanceType2Arch", - { - "Ref":"NotifierInstanceType" - }, - "Arch" - ] - } - ] - }, - "SecurityGroupIds":[ - { - "Ref":"NotifierSecurityGroup" - } - ], - "Tags":[ - { - "Key":"Name", - "Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"notifier"]]} - }, - { - "Key":"play", - "Value":"notifier" - }, - { - "Key":"environment", - "Value":{ - "Ref":"EnvironmentTag" - } - }, - { - "Key":"deployment", - "Value":{ - "Ref":"DeploymentTag" - } - } - ], - "BlockDeviceMappings":[ - { - "DeviceName":"/dev/sda1", - "Ebs":{ - "VolumeSize":"100" - } - } - ] - } - }, - "NotifierSecurityGroup":{ - "Type":"AWS::EC2::SecurityGroup", - "Properties":{ - "GroupDescription":"Notifier Security Group", - "VpcId":{ - "Ref":"EdxVPC" - }, - "SecurityGroupIngress":[ - { - "IpProtocol":"tcp", - "FromPort":"22", - "ToPort":"22", - "CidrIp":{ - "Ref":"SSHLocation" - } - } - ] - } - } - }, - "Outputs":{ - "EdxSecurityGroup":{ - "Description":"EC2 Security Group with access to the Edx server", - "Value":{ - "Ref":"EdxappServerSecurityGroup" - } - } - } -} diff --git a/cloudformation_templates/edx-server-multi-instance.json b/cloudformation_templates/edx-server-multi-instance.json deleted file mode 100644 index 29ef895c149..00000000000 --- a/cloudformation_templates/edx-server-multi-instance.json +++ /dev/null @@ -1,293 +0,0 @@ -{ - "AWSTemplateFormatVersion": "2010-09-09", - - "Description": "Sample template to bring up an Edx Server. A WaitCondition is used to hold up the stack creation until the application is deployed. **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters": { - - "GroupTag": { - "Type": "String", - "Description": "Group Tag" - }, - "KeyName": { - "Type": "String", - "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the web server" - }, - "InstanceType" : { - "Description" : "WebServer EC2 instance type", - "Type" : "String", - "Default" : "m1.small", - "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], - "ConstraintDescription" : "must be a valid EC2 instance type." - }, - "SSHLocation" : { - "Description" : "The IP address range that can be used to SSH to the EC2 instances", - "Type": "String", - "MinLength": "9", - "MaxLength": "18", - "Default": "0.0.0.0/0", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." - }, - "WebServerPort" : { - "Description" : "The TCP port for the Web Server", - "Type" : "Number", - "Default" : "8888" - } - }, - - "Mappings" : { - - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "64" }, - "m1.small" : { "Arch" : "64" }, - "m1.medium" : { "Arch" : "64" }, - "m1.large" : { "Arch" : "64" }, - "m1.xlarge" : { "Arch" : "64" }, - "m2.xlarge" : { "Arch" : "64" }, - "m2.2xlarge" : { "Arch" : "64" }, - "m2.4xlarge" : { "Arch" : "64" }, - "m3.xlarge" : { "Arch" : "64" }, - "m3.2xlarge" : { "Arch" : "64" }, - "c1.medium" : { "Arch" : "64" }, - "c1.xlarge" : { "Arch" : "64" } - }, - - "AWSRegionArch2AMI" : { - "us-east-1" : { "32" : "ami-def89fb7", "64" : "ami-d0f89fb9" }, - "us-west-1" : { "32" : "ami-fc002cb9", "64" : "ami-fe002cbb" }, - "us-west-2" : { "32" : "ami-0ef96e3e", "64" : "ami-70f96e40" }, - "eu-west-1" : { "32" : "ami-c27b6fb6", "64" : "ami-ce7b6fba" }, - "sa-east-1" : { "32" : "ami-a1da00bc", "64" : "ami-a3da00be" }, - "ap-southeast-1" : { "32" : "ami-66084734", "64" : "ami-64084736" }, - "ap-southeast-2" : { "32" : "ami-06ea7a3c", "64" : "ami-04ea7a3e" }, - "ap-northeast-1" : { "32" : "ami-fc6ceefd", "64" : "ami-fe6ceeff" } - } - }, - - "Resources" : { - - "WebServerGroup" : { - "Type" : "AWS::AutoScaling::AutoScalingGroup", - "Properties" : { - "Tags" : [ { - "Key" : "Group", - "Value" : { "Ref": "GroupTag" }, - "PropagateAtLaunch" : true - } ], - "AvailabilityZones" : { "Fn::GetAZs" : ""}, - "LaunchConfigurationName" : { "Ref" : "EdxServer" }, - "MinSize" : "2", - "MaxSize" : "2", - "LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ] - } - }, - - "WebServerScaleUpPolicy" : { - "Type" : "AWS::AutoScaling::ScalingPolicy", - "Properties" : { - "AdjustmentType" : "ChangeInCapacity", - "AutoScalingGroupName" : { "Ref" : "WebServerGroup" }, - "Cooldown" : "60", - "ScalingAdjustment" : "1" - } - }, - - "WebServerScaleDownPolicy" : { - "Type" : "AWS::AutoScaling::ScalingPolicy", - "Properties" : { - "AdjustmentType" : "ChangeInCapacity", - "AutoScalingGroupName" : { "Ref" : "WebServerGroup" }, - "Cooldown" : "60", - "ScalingAdjustment" : "-1" - } - }, - - "CPUAlarmHigh": { - "Type": "AWS::CloudWatch::Alarm", - "Properties": { - "AlarmDescription": "Scale-up if CPU > 90% for 10 minutes", - "MetricName": "CPUUtilization", - "Namespace": "AWS/EC2", - "Statistic": "Average", - "Period": "300", - "EvaluationPeriods": "2", - "Threshold": "90", - "AlarmActions": [ { "Ref": "WebServerScaleUpPolicy" } ], - "Dimensions": [ - { - "Name": "AutoScalingGroupName", - "Value": { "Ref": "WebServerGroup" } - } - ], - "ComparisonOperator": "GreaterThanThreshold" - } - }, - - "CPUAlarmLow": { - "Type": "AWS::CloudWatch::Alarm", - "Properties": { - "AlarmDescription": "Scale-down if CPU < 70% for 10 minutes", - "MetricName": "CPUUtilization", - "Namespace": "AWS/EC2", - "Statistic": "Average", - "Period": "300", - "EvaluationPeriods": "2", - "Threshold": "70", - "AlarmActions": [ { "Ref": "WebServerScaleDownPolicy" } ], - "Dimensions": [ - { - "Name": "AutoScalingGroupName", - "Value": { "Ref": "WebServerGroup" } - } - ], - "ComparisonOperator": "LessThanThreshold" - } - }, - - "ElasticLoadBalancer" : { - "Type" : "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties" : { - "AvailabilityZones" : { "Fn::GetAZs" : "" }, - "Listeners" : [ { - "LoadBalancerPort" : "80", - "InstancePort" : { "Ref" : "WebServerPort" }, - "Protocol" : "HTTP" - } ], - "HealthCheck" : { - "Target" : "TCP:22", - "HealthyThreshold" : "3", - "UnhealthyThreshold" : "5", - "Interval" : "30", - "Timeout" : "5" - } - } - }, - "InstanceSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Enable SSH access and HTTP from the load balancer only", - "SecurityGroupIngress" : [ { - "IpProtocol" : "tcp", - "FromPort" : "22", - "ToPort" : "22", - "CidrIp" : { "Ref" : "SSHLocation"} - }, - { - "IpProtocol" : "tcp", - "FromPort" : { "Ref" : "WebServerPort" }, - "ToPort" : { "Ref" : "WebServerPort" }, - "SourceSecurityGroupOwnerId" : {"Fn::GetAtt" : ["ElasticLoadBalancer", "SourceSecurityGroup.OwnerAlias"]}, - "SourceSecurityGroupName" : {"Fn::GetAtt" : ["ElasticLoadBalancer", "SourceSecurityGroup.GroupName"]} - } ] - } - }, - - "EdxServerUser" : { - "Type" : "AWS::IAM::User", - "Properties" : { - "Path": "/", - "Policies": [{ - "PolicyName": "root", - "PolicyDocument": { "Statement":[{ - "Effect":"Allow", - "Action": [ - "cloudformation:DescribeStackResource", - "s3:Put" - ], - "Resource":"*" - }]} - }] - } - }, - - "HostKeys" : { - "Type" : "AWS::IAM::AccessKey", - "Properties" : { - "UserName" : {"Ref": "EdxServerUser"} - } - }, - - "EdxServer": { - "Type": "AWS::AutoScaling::LaunchConfiguration", - "Metadata" : { - "AWS::CloudFormation::Init" : { - "config" : { - "files" : { - "/home/ubuntu/.s3cfg" : { - "content" : { "Fn::Join" : ["", [ - "[default]\n", - "access_key = ", { "Ref" : "HostKeys" }, "\n", - "secret_key = ", {"Fn::GetAtt": ["HostKeys", "SecretAccessKey"]}, "\n", - "use_https = True\n" - ]]}, - "mode" : "000644", - "owner" : "ubuntu", - "group" : "ubuntu" - } - } - } - } - }, - "Properties": { - "SecurityGroups": [ { "Ref": "EdxServerSecurityGroup" } ], - "ImageId": { "Fn::FindInMap": [ "AWSRegionArch2AMI", { "Ref": "AWS::Region" }, { "Fn::FindInMap": [ "AWSInstanceType2Arch", { "Ref": "InstanceType" }, "Arch" ] } ] - }, - "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ - "#!/bin/bash\n", - "exec >> /home/ubuntu/cflog.log\n", - "exec 2>> /home/ubuntu/cflog.log\n", - "function error_exit\n", - "{\n", - " cfn-signal -e 1 -r \"$1\" '", { "Ref" : "EdxServerWaitHandle" }, "'\n", - " exit 1\n", - "}\n", - "apt-get -y update\n", - "apt-get -y install python-setuptools\n", - "echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n", - "easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", - "echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n", - "cfn-init --region ", { "Ref" : "AWS::Region" }, - " -s ", { "Ref" : "AWS::StackName" }, " -r EdxServer ", - " --access-key ", { "Ref" : "HostKeys" }, - " --secret-key ", {"Fn::GetAtt": ["HostKeys", "SecretAccessKey"]}, " || error_exit 'Failed to run cfn-init'\n", - "echo \"cfn-init run \" - `date` >> /home/ubuntu/cflog.txt\n", - "# If all went well, signal success\n", - "cfn-signal -e $? -r 'Edx Server configuration' '", { "Ref" : "EdxServerWaitHandle" }, "'\n" - ]]}}, - "KeyName": { "Ref": "KeyName" }, - "InstanceType": { "Ref": "InstanceType" } - } - }, - "EdxServerSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Open up SSH access plus Edx Server required ports", - "SecurityGroupIngress" : [ - { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": { "Ref" : "SSHLocation"} }, - { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0"}, - { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0"} - ] - } - }, - "EdxServerWaitHandle" : { - "Type" : "AWS::CloudFormation::WaitConditionHandle" - }, - - "EdxServerWaitCondition" : { - "Type" : "AWS::CloudFormation::WaitCondition", - "DependsOn" : "EdxServer", - "Properties" : { - "Handle" : { "Ref" : "EdxServerWaitHandle" }, - "Timeout" : "1200" - } - } - }, - - "Outputs" : { - "EdxSecurityGroup" : { - "Description" : "EC2 Security Group with access to the Edx server", - "Value" : { "Ref" :"EdxServerSecurityGroup" } - } - } -} diff --git a/cloudformation_templates/edx-server-single-instance.json b/cloudformation_templates/edx-server-single-instance.json deleted file mode 100644 index b0ab55e2653..00000000000 --- a/cloudformation_templates/edx-server-single-instance.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "AWSTemplateFormatVersion": "2010-09-09", - - "Description": "Sample template to bring up an Edx Server. A WaitCondition is used to hold up the stack creation until the application is deployed. **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters": { - - "NameTag": { - "Type": "String", - "Description": "Name Tag" - }, - "GroupTag": { - "Type": "String", - "Description": "Group Tag" - }, - "KeyName": { - "Type": "String", - "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the web server" - }, - "InstanceType" : { - "Description" : "WebServer EC2 instance type", - "Type" : "String", - "Default" : "m1.small", - "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], - "ConstraintDescription" : "must be a valid EC2 instance type." - }, - "SSHLocation" : { - "Description" : "The IP address range that can be used to SSH to the EC2 instances", - "Type": "String", - "MinLength": "9", - "MaxLength": "18", - "Default": "0.0.0.0/0", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." - } - }, - - "Mappings" : { - - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "64" }, - "m1.small" : { "Arch" : "64" }, - "m1.medium" : { "Arch" : "64" }, - "m1.large" : { "Arch" : "64" }, - "m1.xlarge" : { "Arch" : "64" }, - "m2.xlarge" : { "Arch" : "64" }, - "m2.2xlarge" : { "Arch" : "64" }, - "m2.4xlarge" : { "Arch" : "64" }, - "m3.xlarge" : { "Arch" : "64" }, - "m3.2xlarge" : { "Arch" : "64" }, - "c1.medium" : { "Arch" : "64" }, - "c1.xlarge" : { "Arch" : "64" } - }, - - "AWSRegionArch2AMI" : { - "us-east-1" : { "32" : "ami-def89fb7", "64" : "ami-d0f89fb9" }, - "us-west-1" : { "32" : "ami-fc002cb9", "64" : "ami-fe002cbb" }, - "us-west-2" : { "32" : "ami-0ef96e3e", "64" : "ami-70f96e40" }, - "eu-west-1" : { "32" : "ami-c27b6fb6", "64" : "ami-ce7b6fba" }, - "sa-east-1" : { "32" : "ami-a1da00bc", "64" : "ami-a3da00be" }, - "ap-southeast-1" : { "32" : "ami-66084734", "64" : "ami-64084736" }, - "ap-southeast-2" : { "32" : "ami-06ea7a3c", "64" : "ami-04ea7a3e" }, - "ap-northeast-1" : { "32" : "ami-fc6ceefd", "64" : "ami-fe6ceeff" } - } - }, - - "Resources" : { - - "EdxServerUser" : { - "Type" : "AWS::IAM::User", - "Properties" : { - "Path": "/", - "Policies": [{ - "PolicyName": "root", - "PolicyDocument": { "Statement":[{ - "Effect":"Allow", - "Action": [ - "cloudformation:DescribeStackResource", - "s3:Put" - ], - "Resource":"*" - }]} - }] - } - }, - - "HostKeys" : { - "Type" : "AWS::IAM::AccessKey", - "Properties" : { - "UserName" : {"Ref": "EdxServerUser"} - } - }, - - "EdxServer": { - "Type": "AWS::EC2::Instance", - "Metadata" : { - "AWS::CloudFormation::Init" : { - "config" : { - "files" : { - "/home/ubuntu/.s3cfg" : { - "content" : { "Fn::Join" : ["", [ - "[default]\n", - "access_key = ", { "Ref" : "HostKeys" }, "\n", - "secret_key = ", {"Fn::GetAtt": ["HostKeys", "SecretAccessKey"]}, "\n", - "use_https = True\n" - ]]}, - "mode" : "000644", - "owner" : "ubuntu", - "group" : "ubuntu" - } - } - } - } - }, - "Properties": { - "Tags" : [ { - "Key" : "Name", - "Value" :{ "Ref": "NameTag" } - }, - { - "Key" : "Group", - "Value" : { "Ref": "GroupTag" } - } - ], - "SecurityGroups": [ { "Ref": "EdxServerSecurityGroup" } ], - "ImageId": { "Fn::FindInMap": [ "AWSRegionArch2AMI", { "Ref": "AWS::Region" }, { "Fn::FindInMap": [ "AWSInstanceType2Arch", { "Ref": "InstanceType" }, "Arch" ] } ] - }, - "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ - "#!/bin/bash\n", - "function error_exit\n", - "{\n", - " cfn-signal -e 1 -r \"$1\" '", { "Ref" : "EdxServerWaitHandle" }, "'\n", - " exit 1\n", - "}\n", - - "apt-get -y install python-setuptools\n", - "echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n", - "easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", - "echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n", - "cfn-init --region ", { "Ref" : "AWS::Region" }, - " -s ", { "Ref" : "AWS::StackId" }, " -r EdxServer ", - " --access-key ", { "Ref" : "HostKeys" }, - " --secret-key ", {"Fn::GetAtt": ["HostKeys", "SecretAccessKey"]}, " || error_exit 'Failed to run cfn-init'\n", - "echo \"cfn-init run \" - `date` >> /home/ubuntu/cflog.txt\n", - "# If all went well, signal success\n", - "cfn-signal -e $? -r 'Edx Server configuration' '", { "Ref" : "EdxServerWaitHandle" }, "'\n" - ]]}}, - "KeyName": { "Ref": "KeyName" }, - "InstanceType": { "Ref": "InstanceType" } - } - }, - - "EdxServerSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Open up SSH access plus Edx Server required ports", - "SecurityGroupIngress" : [ - { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": { "Ref" : "SSHLocation"} }, - { "IpProtocol": "tcp", "FromPort": "4000", "ToPort": "4000", "SourceSecurityGroupName": { "Ref" :"EdxClientSecurityGroup" }}, - { "IpProtocol": "tcp", "FromPort": "4040", "ToPort": "4040", "CidrIp": "0.0.0.0/0"} - ] - } - }, - - "EdxClientSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Group with access to Edx Server" - } - }, - - "EdxServerWaitHandle" : { - "Type" : "AWS::CloudFormation::WaitConditionHandle" - }, - - "EdxServerWaitCondition" : { - "Type" : "AWS::CloudFormation::WaitCondition", - "DependsOn" : "EdxServer", - "Properties" : { - "Handle" : { "Ref" : "EdxServerWaitHandle" }, - "Timeout" : "1200" - } - } - }, - - "Outputs" : { - "EdxSecurityGroup" : { - "Description" : "EC2 Security Group with access to the Edx server", - "Value" : { "Ref" :"EdxClientSecurityGroup" } - } - } -} diff --git a/cloudformation_templates/examples/EC2_Instance_With_Block_Device_Mapping.json b/cloudformation_templates/examples/EC2_Instance_With_Block_Device_Mapping.json deleted file mode 100644 index b0ddb6951a3..00000000000 --- a/cloudformation_templates/examples/EC2_Instance_With_Block_Device_Mapping.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "AWSTemplateFormatVersion" : "2010-09-09", - - "Description" : "AWS CloudFormation Sample Template EC2_Instance_With_Block_Device_Mapping: Example to show how to attach EBS volumes and modify the root device using EC2 block device mappings. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters" : { - "InstanceType" : { - "Description" : "WebServer EC2 instance type", - "Type" : "String", - "Default" : "m1.small", - "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m3.xlarge","m3.2xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge","hi1.4xlarge","hs1.8xlarge"], - "ConstraintDescription" : "must be a valid EC2 instance type." - }, - - "KeyName" : { - "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the web server", - "Type" : "String" - }, - - "SSHFrom": { - "Description": "Lockdown SSH access to the bastion host (default can be accessed from anywhere)", - "Type": "String", - "MinLength": "9", - "MaxLength": "18", - "Default": "0.0.0.0/0", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription": "must be a valid CIDR range of the form x.x.x.x/x." - } - }, - - "Mappings" : { - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "PV64" }, - - "m1.small" : { "Arch" : "PV64" }, - "m1.medium" : { "Arch" : "PV64" }, - "m1.large" : { "Arch" : "PV64" }, - "m1.xlarge" : { "Arch" : "PV64" }, - - "m3.xlarge" : { "Arch" : "PV64" }, - "m3.2xlarge" : { "Arch" : "PV64" }, - - "m2.xlarge" : { "Arch" : "PV64" }, - "m2.2xlarge" : { "Arch" : "PV64" }, - "m2.4xlarge" : { "Arch" : "PV64" }, - - "c1.medium" : { "Arch" : "PV64" }, - "c1.xlarge" : { "Arch" : "PV64" }, - - "cc1.4xlarge" : { "Arch" : "CLU64" }, - "cc2.8xlarge" : { "Arch" : "CLU64" }, - - "cg1.4xlarge" : { "Arch" : "GPU64" }, - - "hi1.4xlarge" : { "Arch" : "PV64" }, - - "hs1.8xlarge" : { "Arch" : "PV64" } - }, - - "AWSRegionArch2AMI" : { - "us-east-1" : { "PV64" : "ami-3c994355", "CLU64" : "ami-08249861", "GPU64" : "ami-02f54a6b" }, - "us-west-2" : { "PV64" : "ami-20800c10", "CLU64" : "ami-2431bf14", "GPU64" : "NOT_YET_SUPPORTED" }, - "us-west-1" : { "PV64" : "ami-87712ac2", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "eu-west-1" : { "PV64" : "ami-c37474b7", "CLU64" : "ami-d97474ad", "GPU64" : "ami-1b02026f" }, - "ap-southeast-1" : { "PV64" : "ami-a6a7e7f4", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-southeast-2" : { "PV64" : "ami-bd990e87", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-northeast-1" : { "PV64" : "ami-4e6cd34f", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "sa-east-1" : { "PV64" : "ami-1e08d103", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" } - } - }, - - "Resources" : { - "Ec2Instance" : { - "Type" : "AWS::EC2::Instance", - "Properties" : { - "ImageId" : { "Fn::FindInMap" : [ "AWSRegionArch2AMI", { "Ref" : "AWS::Region" }, - { "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : "InstanceType" }, "Arch" ] } ] }, - "KeyName" : { "Ref" : "KeyName" }, - "InstanceType" : { "Ref" : "InstanceType" }, - "SecurityGroups" : [{ "Ref" : "Ec2SecurityGroup" }], - "BlockDeviceMappings" : [ - { - "DeviceName" : "/dev/sda1", - "Ebs" : { "VolumeSize" : "50" } - },{ - "DeviceName" : "/dev/sdm", - "Ebs" : { "VolumeSize" : "100" } - } - ] - } - }, - - "Ec2SecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "HTTP and SSH access", - "SecurityGroupIngress" : [ { - "IpProtocol" : "tcp", - "FromPort" : "22", "ToPort" : "22", - "CidrIp" : { "Ref" : "SSHFrom" } - } ] - } - } - }, - - "Outputs" : { - "Instance" : { - "Value" : { "Fn::GetAtt" : [ "Ec2Instance", "PublicDnsName" ] }, - "Description" : "DNS Name of the newly created EC2 instance" - } - } -} diff --git a/cloudformation_templates/examples/ElastiCache.json b/cloudformation_templates/examples/ElastiCache.json deleted file mode 100644 index 868a9092c67..00000000000 --- a/cloudformation_templates/examples/ElastiCache.json +++ /dev/null @@ -1,235 +0,0 @@ -{ - "AWSTemplateFormatVersion" : "2010-09-09", - - "Description" : "AWS CloudFormation Sample Template ElastiCache: Sample template showing how to create an Amazon ElastiCache Cache Cluster with Auto Discovery and access it from a very simple PHP application. **WARNING** This template creates an Amazon Ec2 Instance and an Amazon ElastiCache Cluster. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters" : { - - "KeyName" : { - "Description" : "Name of an existing Amazon EC2 KeyPair for SSH access to the Web Server", - "Type" : "String" - }, - - "InstanceType" : { - "Description" : "WebServer EC2 instance type", - "Type" : "String", - "Default" : "m1.small", - "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge", "m3.xlarge", "m3.2xlarge", "m2.xlarge","m2.2xlarge","m2.4xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge", "hi1.4xlarge", "hs1.8xlarge"], - "ConstraintDescription" : "must be a valid EC2 instance type." - }, - - "CacheNodeType" : { - "Default" : "cache.m1.small", - "Description" : "The compute and memory capacity of the nodes in the Cache Cluster", - "Type" : "String", - "AllowedValues" : [ "cache.m1.small", "cache.m1.large", "cache.m1.xlarge", "cache.m2.xlarge", "cache.m2.2xlarge", "cache.m2.4xlarge", "cache.c1.xlarge" ], - "ConstraintDescription" : "must select a valid Cache Node type." - }, - - "NumberOfCacheNodes" : { - "Default": "1", - "Description" : "The number of Cache Nodes the Cache Cluster should have", - "Type": "Number", - "MinValue": "1", - "MaxValue": "10", - "ConstraintDescription" : "must be between 5 and 10." - }, - "SSHLocation" : { - "Description" : "The IP address range that can be used to SSH to the EC2 instances", - "Type": "String", - "MinLength": "9", - "MaxLength": "18", - "Default": "0.0.0.0/0", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." - } - }, - - "Mappings" : { - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "PV64" }, - "m1.small" : { "Arch" : "PV64" }, - "m1.medium" : { "Arch" : "PV64" }, - "m1.large" : { "Arch" : "PV64" }, - "m1.xlarge" : { "Arch" : "PV64" }, - "m3.xlarge" : { "Arch" : "PV64" }, - "m3.2xlarge" : { "Arch" : "PV64" }, - "m2.xlarge" : { "Arch" : "PV64" }, - "m2.2xlarge" : { "Arch" : "PV64" }, - "m2.4xlarge" : { "Arch" : "PV64" }, - "c1.medium" : { "Arch" : "PV64" }, - "c1.xlarge" : { "Arch" : "PV64" }, - "cc1.4xlarge" : { "Arch" : "CLU64" }, - "cc2.8xlarge" : { "Arch" : "CLU64" }, - "cg1.4xlarge" : { "Arch" : "GPU64" }, - "hi1.4xlarge" : { "Arch" : "PV64" }, - "hs1.8xlarge" : { "Arch" : "PV64" } - }, - - "AWSRegionArch2AMI" : { - "us-east-1" : { "PV64" : "ami-1624987f", "CLU64" : "ami-08249861", "GPU64" : "ami-02f54a6b" }, - "us-west-2" : { "PV64" : "ami-2a31bf1a", "CLU64" : "ami-2431bf14", "GPU64" : "NOT_YET_SUPPORTED" }, - "us-west-1" : { "PV64" : "ami-1bf9de5e", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "eu-west-1" : { "PV64" : "ami-c37474b7", "CLU64" : "ami-d97474ad", "GPU64" : "ami-1b02026f" }, - "ap-southeast-1" : { "PV64" : "ami-a6a7e7f4", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-southeast-2" : { "PV64" : "ami-bd990e87", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-northeast-1" : { "PV64" : "ami-4e6cd34f", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "sa-east-1" : { "PV64" : "ami-1e08d103", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" } - } - }, - - "Resources" : { - - "CacheCluster" : { - "Type": "AWS::ElastiCache::CacheCluster", - "Properties": { - "CacheNodeType" : { "Ref" : "CacheNodeType" }, - "CacheSecurityGroupNames" : [ { "Ref" : "CacheSecurityGroup" } ], - "Engine" : "memcached", - "NumCacheNodes" : { "Ref" : "NumberOfCacheNodes" } - } - }, - - "CacheSecurityGroup": { - "Type": "AWS::ElastiCache::SecurityGroup", - "Properties": { - "Description" : "Lock cache down to Web Server access only" - } - }, - - "CacheSecurityGroupIngress": { - "Type": "AWS::ElastiCache::SecurityGroupIngress", - "Properties": { - "CacheSecurityGroupName" : { "Ref" : "CacheSecurityGroup" }, - "EC2SecurityGroupName" : { "Ref" : "WebServerSecurityGroup" } - } - }, - - "WebServerSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Enable HTTP and SSH access", - "SecurityGroupIngress" : [ - {"IpProtocol" : "tcp", "FromPort" : "22", "ToPort" : "22", "CidrIp" : { "Ref" : "SSHLocation"} }, - {"IpProtocol" : "tcp", "FromPort" : "80", "ToPort" : "80", "CidrIp" : "0.0.0.0/0"} - ] - } - }, - - "WebServerHost": { - "Type" : "AWS::EC2::Instance", - "Metadata" : { - "AWS::CloudFormation::Init" : { - "config" : { - "packages" : { - "yum" : { - "httpd" : [], - "gcc-c++" : [], - "php" : [], - "php-pear" : [] - } - }, - - "files" : { - "/var/www/html/index.php" : { - "content" : { "Fn::Join" : ["", [ - "AWS CloudFormation sample application for Amazon ElastiCache';\n", - "\n", - "$server_endpoint = '", { "Fn::GetAtt" : [ "CacheCluster", "ConfigurationEndpoint.Address" ]}, "';\n", - "$server_port = ", { "Fn::GetAtt" : [ "CacheCluster", "ConfigurationEndpoint.Port" ]}, ";\n", - "\n", - "/**\n", - " * The following will initialize a Memcached client to utilize the Auto Discovery feature.\n", - " * \n", - " * By configuring the client with the Dynamic client mode with single endpoint, the\n", - " * client will periodically use the configuration endpoint to retrieve the current cache\n", - " * cluster configuration. This allows scaling the cache cluster up or down in number of nodes\n", - " * without requiring any changes to the PHP application. \n", - " */\n", - "\n", - "$dynamic_client = new Memcached();\n", - "$dynamic_client->setOption(Memcached::OPT_CLIENT_MODE, Memcached::DYNAMIC_CLIENT_MODE);\n", - "$dynamic_client->addServer($server_endpoint, $server_port);\n", - "\n", - "$tmp_object = new stdClass;\n", - "$tmp_object->str_attr = 'test';\n", - "$tmp_object->int_attr = 123;\n", - "\n", - "$dynamic_client->set('key', $tmp_object, 10) or die ('Failed to save data to the cache');\n", - "echo '

Store data in the cache (data will expire in 10 seconds)

';\n", - "\n", - "$get_result = $dynamic_client->get('key');\n", - "echo '

Data from the cache:
';\n", - "\n", - "var_dump($get_result);\n", - "\n", - "echo '

';\n", - "?>\n" - ]]}, - "mode" : "000644", - "owner" : "apache", - "group" : "apache" - } - }, - - "commands" : { - "00_install_memcached_client" : { - "command" : "pecl install https://s3.amazonaws.com/elasticache-downloads/ClusterClient/PHP/latest-64bit" - }, - "01_enable_auto_discovery" : { - "command" : "echo 'extension=amazon-elasticache-cluster-client.so' > /etc/php.d/memcached.ini" - } - }, - - "services" : { - "sysvinit" : { - "httpd" : { "enabled" : "true", "ensureRunning" : "true" }, - "sendmail" : { "enabled" : "false", "ensureRunning" : "false" } - } - } - } - } - }, - "Properties": { - "ImageId" : { "Fn::FindInMap" : [ "AWSRegionArch2AMI", { "Ref" : "AWS::Region" }, - { "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : "InstanceType" }, "Arch" ]}]}, - "InstanceType" : { "Ref" : "InstanceType" }, - "SecurityGroups" : [ {"Ref" : "WebServerSecurityGroup"} ], - "KeyName" : { "Ref" : "KeyName" }, - "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ - "#!/bin/bash -v\n", - "yum update -y aws-cfn-bootstrap\n", - - "# Setup the PHP sample application\n", - "/opt/aws/bin/cfn-init ", - " --stack ", { "Ref" : "AWS::StackName" }, - " --resource WebServerHost ", - " --region ", { "Ref" : "AWS::Region" }, "\n", - - "# Signal the status of cfn-init\n", - "/opt/aws/bin/cfn-signal -e $? '", { "Ref" : "WebServerWaitHandle" }, "'\n" - ]]}} - } - }, - - "WebServerWaitHandle" : { - "Type" : "AWS::CloudFormation::WaitConditionHandle" - }, - - "WebServerWaitCondition" : { - "Type" : "AWS::CloudFormation::WaitCondition", - "DependsOn" : "WebServerHost", - "Properties" : { - "Handle" : {"Ref" : "WebServerWaitHandle"}, - "Timeout" : "300" - } - } - }, - "Outputs" : { - "WebsiteURL" : { - "Value" : { "Fn::Join" : ["", ["http://", { "Fn::GetAtt" : [ "WebServerHost", "PublicDnsName" ]} ]] }, - "Description" : "Application URL" - } - } -} diff --git a/cloudformation_templates/examples/RDS_MySQL_55_With_Tags.json b/cloudformation_templates/examples/RDS_MySQL_55_With_Tags.json deleted file mode 100644 index f9ab015205d..00000000000 --- a/cloudformation_templates/examples/RDS_MySQL_55_With_Tags.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "AWSTemplateFormatVersion" : "2010-09-09", - - "Description" : "AWS CloudFormation Sample Template RDS_MySQL_55_With_Tags: Sample template showing how to create an RDS DBInstance version 5.5 with tags and alarming on important metrics that indicate the health of the database **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters": { - "DBName": { - "Default": "MyDatabase", - "Description" : "The database name", - "Type": "String", - "MinLength": "1", - "MaxLength": "64", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, - "DBUser": { - "NoEcho": "true", - "Description" : "The database admin account username", - "Type": "String", - "MinLength": "1", - "MaxLength": "16", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, - "DBPassword": { - "NoEcho": "true", - "Description" : "The database admin account password", - "Type": "String", - "MinLength": "1", - "MaxLength": "41", - "AllowedPattern" : "[a-zA-Z0-9]*", - "ConstraintDescription" : "must contain only alphanumeric characters." - }, - "DBAllocatedStorage": { - "Default": "5", - "Description" : "The size of the database (Gb)", - "Type": "Number", - "MinValue": "5", - "MaxValue": "1024", - "ConstraintDescription" : "must be between 5 and 1024Gb." - }, - "DBInstanceClass": { - "Default": "db.m1.small", - "Description" : "The database instance type", - "Type": "String", - "AllowedValues" : [ "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge" ], - "ConstraintDescription" : "must select a valid database instance type." - } - }, - - "Mappings" : { - "InstanceTypeMap" : { - "db.m1.small" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m1.large" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m1.xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m2.xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m2.2xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m2.4xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - } - } - }, - - "Resources" : { - - "MyDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "DBName" : { "Ref" : "DBName" }, - "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Engine" : "MySQL", - "EngineVersion" : "5.5", - "MasterUsername" : { "Ref" : "DBUser" }, - "MasterUserPassword" : { "Ref" : "DBPassword" }, - "Tags" : [{ - "Key" : "Name", - "Value" : "My SQL Database" - }] - }, - "DeletionPolicy" : "Snapshot" - } - }, - - "Outputs" : { - "JDBCConnectionString": { - "Description" : "JDBC connection string for database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "MyDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "MyDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - }, - "DBAddress" : { - "Description" : "Address of database endpoint", - "Value" : { "Fn::GetAtt": [ "MyDB", "Endpoint.Address" ] } - }, - "DBPort" : { - "Description" : "Database endpoint port number", - "Value" : { "Fn::GetAtt": [ "MyDB", "Endpoint.Port" ] } - } - } -} diff --git a/doc/cfn-output-example.png b/doc/cfn-output-example.png deleted file mode 100644 index 42d1ba21e15..00000000000 Binary files a/doc/cfn-output-example.png and /dev/null differ diff --git a/docker.mk b/docker.mk new file mode 100644 index 00000000000..c77b73c9bc7 --- /dev/null +++ b/docker.mk @@ -0,0 +1,89 @@ +.PHONY: docker.build docker.pkg + +SHARD=0 +SHARDS=1 + +dockerfiles:=$(shell ls docker/build/*/Dockerfile) +all_images:=$(patsubst docker/build/%/Dockerfile,%,$(dockerfiles)) + +# Used in the test.mk file as well. +images:=$(if $(TRAVIS_COMMIT_RANGE),$(shell git diff --name-only $(TRAVIS_COMMIT_RANGE) | python util/parsefiles.py),$(all_images)) +# Only use images that actually contain a Dockerfile +images:=$(shell echo "$(all_images) $(images)" | tr " " "\n" | sort | uniq -d) + +docker_build=docker.build. +docker_pkg=docker.pkg. +docker_push=docker.push. + +help: docker.help + +docker.help: + @echo ' Docker:' + @echo ' $$image: any dockerhub image' + @echo ' $$container: any container defined in docker/build/$$container/Dockerfile' + @echo '' + @echo ' $(docker_pull)$$image pull $$image from dockerhub' + @echo '' + @echo ' $(docker_build)$$container build $$container' + @echo ' $(docker_pkg)$$container package $$container for a push to dockerhub' + @echo ' $(docker_push)$$container push $$container to dockerhub ' + @echo '' + @echo ' docker.build build all defined docker containers (based on dockerhub base images)' + @echo ' docker.pkg package all defined docker containers (using local base images)' + @echo ' docker.push push all defined docker containers' + @echo '' + +# N.B. / is used as a separator so that % will match the / +# in something like 'edxops/trusty-common:latest' +# Also, make can't handle ':' in filenames, so we instead '@' +# which means the same thing to docker +docker_pull=docker.pull/ + +build: docker.build + +pkg: docker.pkg + +clean: docker.clean + +docker.clean: + rm -rf .build + +docker.build: $(foreach image,$(images),$(docker_build)$(image)) +docker.pkg: $(foreach image,$(images),$(docker_pkg)$(image)) +docker.push: $(foreach image,$(images),$(docker_push)$(image)) + +$(docker_pull)%: + docker pull $(subst @,:,$*) + +$(docker_build)%: docker/build/%/Dockerfile + docker build -f $< . + +$(docker_pkg)%: .build/%/Dockerfile.pkg + docker build -t $*:latest -f $< . + +$(docker_push)%: $(docker_pkg)% + docker tag $*:latest edxops/$*:latest + docker push edxops/$*:latest + + +.build/%/Dockerfile.d: docker/build/%/Dockerfile Makefile + @mkdir -p .build/$* + $(eval BASE_IMAGE_TAG=$(shell grep "^\s*ARG BASE_IMAGE_TAG" $< | sed -E "s/ARG BASE_IMAGE_TAG=//")) + @# I have no idea why the final sed is eating the first character of the substitution... + $(eval FROM=$(shell grep "^\s*FROM" docker/build/ecommerce/Dockerfile | sed -E "s/FROM //" | sed -E "s/:/@/g" | sed -E 's/\$\{BASE_IMAGE_TAG\}/ $(BASE_IMAGE_TAG)/')) + $(eval EDXOPS_FROM=$(shell echo "$(FROM)" | sed -E "s#edxops/([^@]+)(@.*)?#\1#")) + @echo "Base Image Tag: $(BASE_IMAGE_TAG)" + @echo $(FROM) + @echo $(EDXOPS_FROM) + @echo "$(docker_build)$*: $(docker_pull)$(FROM)" > $@ + @if [ "$(EDXOPS_FROM)" != "$(FROM)" ]; then \ + echo "$(docker_pkg)$*: $(docker_pkg)$(EDXOPS_FROM:@%=)" >> $@; \ + else \ + echo "$(docker_pkg)$*: $(docker_pull)$(FROM)" >> $@; \ + fi + +.build/%/Dockerfile.pkg: docker/build/%/Dockerfile Makefile + @mkdir -p .build/$* + @# perl p (print the line) n (loop over every line) e (exec the regex), like sed but cross platform + +-include $(foreach image,$(images),.build/$(image)/Dockerfile.d) diff --git a/docker/README.rst b/docker/README.rst new file mode 100644 index 00000000000..a68a8f81e80 --- /dev/null +++ b/docker/README.rst @@ -0,0 +1,78 @@ +Docker Support +############## + +Introduction +************ + +Docker support for edX services is volatile and experimental. We welcome +interested testers and contributors. If you are interested in participating, +please join us on Slack at https://openedx.slack.com/messages/docker. + +We do not and may never run these images in production. They are not +currently suitable for production use. + +Tooling +******* + +``Dockerfile``\ s for individual services should be placed in +``docker/build/``. There should be an accompanying +``ansible_overrides.yml`` which specifies any docker-specific configuration +values. + +Once the ``Dockerfile`` has been created, it can be built and published using a +set of make commands. + +.. code:: shell + + make docker.build. # Build the service container (but don't tag it) + # By convention, this will build the container using + # the currently checked-out configuration repository, + # and will build on top of the most-recently available + # base container image from dockerhub. + + make docker.test. # Test that the Dockerfile for will build. + # This will rebuild any edx-specific containers that + # the Dockerfile depends on as well, in case there + # are failures as a result of changes to the base image. + + make docker.pkg. # Package for publishing to Dockerhub. This + # will also package and tag pre-requisite service containers. + + make docker.push. # Push to Dockerhub as latest. + +Image naming +************ + +The latest images built from master branches are named ``edxops/:latest``, +for example, ``edxops/edxapp:latest``. Images built from Open edX release +branches use an appropriate tag, such as ``edxops/edxapp:hawthorn.master``. + +Build arguments +*************** + +Dockerfiles make use of these build arguments: + +- ``OPENEDX_RELEASE`` is the release branch to use. It defaults to "master". + To use an Open edX release, provide the full branch name: + +``--build-arg OPENEDX_RELEASE=open-release/hawthorn.master`` + +- ``BASE_IMAGE_TAG`` is the tag for the base image to build on. It + defaults to "latest" for master builds. For an Open edX release, use + the name of the release: + +``--build-arg BASE_IMAGE_TAG=hawthorn.master`` + +Conventions +*********** + +In order to facilitate development, Dockerfiles should be based on one of the +``edxops/-common`` base images, and should +``COPY . /edx/app/edx_ansible/edx_ansible`` in order to load your local ansible +plays into the image. The actual work of configuring the image should be done +by executing ansible (rather than explicit steps in the Dockerfile), unless +those steps are docker specific. Devstack-specific steps can be tagged with the +``devstack:install`` tag in order that they only run when building a devstack +image. + +The user used in the ``Dockerfile`` should be ``root``. diff --git a/docker/build/analytics_pipeline/Dockerfile b/docker/build/analytics_pipeline/Dockerfile new file mode 100644 index 00000000000..8cccf22c2e1 --- /dev/null +++ b/docker/build/analytics_pipeline/Dockerfile @@ -0,0 +1,156 @@ +ARG BASE_IMAGE_TAG=latest +FROM edxops/xenial-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" + +USER root +ENV BOTO_CONFIG=/dev/null \ + JDK_URL=http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz \ + JDK_DIST_FILE=jdk-8u131-linux-x64.tar.gz \ + JAVA_HOME=/usr/lib/jvm/java-8-oracle \ + HADOOP_URL=https://archive.apache.org/dist/hadoop/common/hadoop-2.7.2/hadoop-2.7.2.tar.gz \ + HADOOP_DIST_FILE=hadoop-2.7.2.tar.gz \ + HADOOP_HOME=/edx/app/hadoop/hadoop \ + HADOOP_PREFIX=/edx/app/hadoop/hadoop \ + HIVE_URL=https://archive.apache.org/dist/hive/hive-2.1.1/apache-hive-2.1.1-bin.tar.gz \ + HIVE_DIST_FILE=apache-hive-2.1.1-bin.tar.gz \ + HIVE_HOME=/edx/app/hadoop/hive \ + SQOOP_URL=http://archive.apache.org/dist/sqoop/1.4.6/sqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz \ + SQOOP_DIST_FILE=sqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz \ + SQOOP_MYSQL_CONNECTOR_URL=http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.29.tar.gz \ + SQOOP_MYSQL_CONNECTOR_FILE=mysql-connector-java-5.1.29 \ + SQOOP_HOME=/edx/app/hadoop/sqoop \ + SQOOP_LIB=/edx/app/hadoop/sqoop/lib \ + SQOOP_VERTICA_CONNECTOR_URL=https://vertica.com/client_drivers/9.1.x/9.1.1-0/vertica-jdbc-9.1.1-0.jar \ + SQOOP_VERTICA_CONNECTOR_FILE=vertica-jdbc-9.1.1-0.jar \ + SPARK_URL=https://archive.apache.org/dist/spark/spark-2.1.0/spark-2.1.0-bin-hadoop2.7.tgz \ + SPARK_DIST_FILE=spark-2.1.0-bin-hadoop2.7.tgz \ + SPARK_HOME=/edx/app/hadoop/spark \ + LUIGI_CONFIG_PATH=/edx/app/analytics_pipeline/analytics_pipeline/config/luigi_docker.cfg \ + ANALYTICS_PIPELINE_VENV=/edx/app/analytics_pipeline/venvs \ + BOOTSTRAP=/etc/bootstrap.sh \ + COMMON_BASE_DIR=/edx \ + COMMON_PIP_PACKAGES_PIP='pip==21.2.1' \ + COMMON_PIP_PACKAGES_SETUPTOOLS='setuptools==44.1.0' \ + COMMON_PIP_PACKAGES_VIRTUALENV='virtualenv==20.1.0' \ + COMMON_MYSQL_READ_ONLY_USER='read_only' \ + COMMON_MYSQL_READ_ONLY_PASS='password' \ + ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER='pipeline001' \ + ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD='password' \ + EDX_PPA_KEY_SERVER='keyserver.ubuntu.com' \ + EDX_PPA_KEY_ID='69464050' + + +ENV PATH="/edx/app/analytics_pipeline/venvs/analytics_pipeline/bin:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${HIVE_HOME}/bin:${SPARK_HOME}/bin:${SPARK_HOME}/sbin:${SQOOP_HOME}/bin:$PATH" \ + COMMON_DATA_DIR=$COMMON_BASE_DIR/var \ + COMMON_APP_DIR=$COMMON_BASE_DIR/app \ + COMMON_LOG_DIR=$COMMON_BASE_DIR/var/log \ + COMMON_BIN_DIR=$COMMON_BASE_DIR/bin \ + COMMON_CFG_DIR=$COMMON_BASE_DIR/etc + +# add custom PPAs & install packages +RUN apt-get update -y && apt-get install -y software-properties-common \ + && apt-key adv --keyserver $EDX_PPA_KEY_SERVER --recv-keys $EDX_PPA_KEY_ID \ + && add-apt-repository -y 'deb http://ppa.edx.org xenial main' \ + && apt-get update -y \ + && apt-get install --no-install-recommends -y \ + python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 libmysqlclient-dev libffi-dev libssl-dev \ + libatlas-base-dev libblas-dev liblapack-dev libpq-dev sudo make build-essential git-core \ + openssh-server openssh-client rsync software-properties-common vim net-tools curl netcat mysql-client-5.6 \ + apt-transport-https ntp acl lynx-cur logrotate rsyslog unzip \ + ack-grep mosh tree screen tmux dnsutils inetutils-telnet \ + && rm -rf /var/lib/apt/lists/* + +# creating directory structure +RUN mkdir -p $HADOOP_HOME $JAVA_HOME $ANALYTICS_PIPELINE_VENV /edx/app/hadoop/lib $HIVE_HOME /etc/luigi \ + $SPARK_HOME $SQOOP_HOME $COMMON_DATA_DIR $COMMON_APP_DIR $COMMON_LOG_DIR $COMMON_BIN_DIR $COMMON_CFG_DIR/edx-analytics-pipeline + +# create user & group for hadoop +RUN groupadd hadoop +RUN useradd -ms /bin/bash hadoop -g hadoop -d /edx/app/hadoop +RUN echo '%hadoop ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers + +# JAVA +RUN curl -fSL --header "Cookie:oraclelicense=accept-securebackup-cookie" "$JDK_URL" -o /var/tmp/$JDK_DIST_FILE \ + && tar -xzf /var/tmp/$JDK_DIST_FILE -C $JAVA_HOME --strip-components=1 \ + && rm -f /var/tmp/$JDK_DIST_FILE + +# HADOOP +RUN curl -fSL "$HADOOP_URL" -o /var/tmp/$HADOOP_DIST_FILE \ + && tar -xzf /var/tmp/$HADOOP_DIST_FILE -C $HADOOP_HOME --strip-components=1 \ + && sed -i '/^export JAVA_HOME/ s:.*:export JAVA_HOME=/usr/lib/jvm/java-8-oracle\nexport HADOOP_PREFIX=/edx/app/hadoop/hadoop\nexport HADOOP_HOME=/edx/app/hadoop/hadoop\n:' $HADOOP_HOME/etc/hadoop/hadoop-env.sh \ + && sed -i '/^export HADOOP_CONF_DIR/ s:.*:export HADOOP_CONF_DIR=/edx/app/hadoop/hadoop/etc/hadoop/:' $HADOOP_HOME/etc/hadoop/hadoop-env.sh \ + && sed -i 's##fs.defaultFShdfs://namenode:8020#' $HADOOP_HOME/etc/hadoop/core-site.xml \ + && sed 's##mapreduce.framework.nameyarn#' $HADOOP_HOME/etc/hadoop/mapred-site.xml.template > $HADOOP_HOME/etc/hadoop/mapred-site.xml \ + && sed -i 's##yarn.resourcemanager.hostnameresourcemanager#' $HADOOP_HOME/etc/hadoop/yarn-site.xml \ + && rm -f /var/tmp/$HADOOP_DIST_FILE + +# HIVE +RUN curl -fSL "$HIVE_URL" -o /var/tmp/$HIVE_DIST_FILE \ + && tar -xzf /var/tmp/$HIVE_DIST_FILE -C $HIVE_HOME --strip-components=1 \ + && rm -f /var/tmp/$HIVE_DIST_FILE +ADD docker/build/analytics_pipeline/hive-site.xml.template $HIVE_HOME/conf/hive-site.xml + +# SPARK +RUN curl -fSL "$SPARK_URL" -o /var/tmp/$SPARK_DIST_FILE \ + && tar -xzf /var/tmp/$SPARK_DIST_FILE -C $SPARK_HOME --strip-components=1 \ + && echo 'spark.master spark://sparkmaster:7077\nspark.eventLog.enabled true\nspark.eventLog.dir hdfs://namenode:8020/tmp/spark-events\nspark.history.fs.logDirectory hdfs://namenode:8020/tmp/spark-events\nspark.sql.warehouse.dir hdfs://namenode:8020/spark-warehouse' > $SPARK_HOME/conf/spark-defaults.conf \ + && rm -f /var/tmp/$SPARK_DIST_FILE + +# SQOOP +RUN curl -fSL "$SQOOP_URL" -o /var/tmp/$SQOOP_DIST_FILE \ + && curl -fSL "$SQOOP_MYSQL_CONNECTOR_URL" -o /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE.tar.gz \ + && curl -fSL "$SQOOP_VERTICA_CONNECTOR_URL" -o /var/tmp/$SQOOP_VERTICA_CONNECTOR_FILE \ + && tar -xzf /var/tmp/$SQOOP_DIST_FILE -C $SQOOP_HOME --strip-components=1 \ + && tar -xzf /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE.tar.gz -C /var/tmp/ \ + && cp /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE/$SQOOP_MYSQL_CONNECTOR_FILE-bin.jar $SQOOP_LIB \ + && cp /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE/$SQOOP_MYSQL_CONNECTOR_FILE-bin.jar $HIVE_HOME/lib/ \ + && cp /var/tmp/$SQOOP_VERTICA_CONNECTOR_FILE $SQOOP_LIB \ + && rm -rf /var/tmp/$SQOOP_DIST_FILE /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE* /var/tmp/$SQOOP_VERTICA_CONNECTOR_FILE* + +WORKDIR /var/tmp +# Edx Hadoop Util Library +RUN git clone https://github.com/edx/edx-analytics-hadoop-util \ + && cd /var/tmp/edx-analytics-hadoop-util \ + && $JAVA_HOME/bin/javac -cp `/edx/app/hadoop/hadoop/bin/hadoop classpath` org/edx/hadoop/input/ManifestTextInputFormat.java \ + && $JAVA_HOME/bin/jar cf /edx/app/hadoop/lib/edx-analytics-hadoop-util.jar org/edx/hadoop/input/ManifestTextInputFormat.class + +# configure bootstrap scripts for container +ADD docker/build/analytics_pipeline/bootstrap.sh /etc/bootstrap.sh +RUN chown hadoop:hadoop /etc/bootstrap.sh \ + && chmod 700 /etc/bootstrap.sh \ + && chown -R hadoop:hadoop /edx/app/hadoop + +# Analytics pipeline +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN git clone https://github.com/openedx/edx-analytics-pipeline \ + && cd edx-analytics-pipeline \ + && git checkout ${OPENEDX_RELEASE} \ + && cd .. \ + && cp /var/tmp/edx-analytics-pipeline/Makefile /var/tmp/Makefile \ + && cp -r /var/tmp/edx-analytics-pipeline/requirements /var/tmp/requirements \ + && rm -rf /var/tmp/edx-analytics-pipeline + +RUN pip install $COMMON_PIP_PACKAGES_PIP $COMMON_PIP_PACKAGES_SETUPTOOLS $COMMON_PIP_PACKAGES_VIRTUALENV \ + && virtualenv $ANALYTICS_PIPELINE_VENV/analytics_pipeline/ \ + && chown -R hadoop:hadoop $ANALYTICS_PIPELINE_VENV/analytics_pipeline/ \ + && echo '[hadoop]\nversion: cdh4\ncommand: /edx/app/hadoop/hadoop/bin/hadoop\nstreaming-jar: /edx/app/hadoop/hadoop/share/hadoop/tools/lib/hadoop-streaming-2.7.2.jar' > /etc/luigi/client.cfg + +RUN apt-get update && make system-requirements +ADD docker/build/analytics_pipeline/devstack.sh /edx/app/analytics_pipeline/devstack.sh +RUN chown hadoop:hadoop /edx/app/analytics_pipeline/devstack.sh && chmod a+x /edx/app/analytics_pipeline/devstack.sh +USER hadoop +RUN touch /edx/app/hadoop/.bashrc \ + && echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle\nexport HADOOP_HOME=/edx/app/hadoop/hadoop\nexport HIVE_HOME=/edx/app/hadoop/hive\nexport SQOOP_HOME=/edx/app/hadoop/sqoop\nexport SPARK_HOME=/edx/app/hadoop/spark\nexport PATH="/edx/app/analytics_pipeline/venvs/analytics_pipeline/bin:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${HIVE_HOME}/bin:${SPARK_HOME}/bin:${SPARK_HOME}/sbin:${SQOOP_HOME}/bin:$PATH"' > /edx/app/hadoop/.bashrc \ + && . $ANALYTICS_PIPELINE_VENV/analytics_pipeline/bin/activate \ + && make test-requirements requirements + +RUN sudo chown hadoop:hadoop $COMMON_CFG_DIR/edx-analytics-pipeline/ \ + && echo "{\"username\": \"$COMMON_MYSQL_READ_ONLY_USER\", \"host\": \"mysql\", \"password\": \"$COMMON_MYSQL_READ_ONLY_PASS\", \"port\": 3306}" > $COMMON_CFG_DIR/edx-analytics-pipeline/input.json \ + && echo "{\"username\": \"$ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER\", \"host\": \"mysql\", \"password\": \"$ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD\", \"port\": 3306}" > $COMMON_CFG_DIR/edx-analytics-pipeline/output.json \ + && echo "{\"username\": \"dbadmin\", \"host\": \"vertica\", \"password\": \"\", \"port\": 5433}" > $COMMON_CFG_DIR/edx-analytics-pipeline/warehouse.json + +ADD docker/build/analytics_pipeline/acceptance.json $COMMON_CFG_DIR/edx-analytics-pipeline/acceptance.json +WORKDIR /edx/app/analytics_pipeline/analytics_pipeline + +CMD ["/etc/bootstrap.sh", "-d"] diff --git a/docker/build/analytics_pipeline/acceptance.json b/docker/build/analytics_pipeline/acceptance.json new file mode 100644 index 00000000000..55b6a116682 --- /dev/null +++ b/docker/build/analytics_pipeline/acceptance.json @@ -0,0 +1,18 @@ +{ + "connection_user": "hadoop", + "credentials_file_url": "/edx/etc/edx-analytics-pipeline/output.json", + "exporter_output_bucket": "", + "geolocation_data": "hdfs://namenode:8020/edx-analytics-pipeline/geo.dat", + "hive_user": "hadoop", + "host": "analyticspipeline", + "identifier": "local-devstack", + "manifest_input_format": "org.edx.hadoop.input.ManifestTextInputFormat", + "oddjob_jar": "hdfs://namenode:8020/edx-analytics-pipeline/packages/edx-analytics-hadoop-util.jar", + "tasks_branch": "origin/HEAD", + "tasks_log_path": "/tmp/acceptance/", + "tasks_repo": "/edx/app/analytics_pipeline/analytics_pipeline", + "tasks_output_url": "hdfs://namenode:8020/tmp/acceptance-test-output/", + "vertica_creds_url": "/edx/etc/edx-analytics-pipeline/warehouse.json", + "elasticsearch_host": "http://elasticsearch:9200/", + "is_remote": "false" +} diff --git a/docker/build/analytics_pipeline/bootstrap.sh b/docker/build/analytics_pipeline/bootstrap.sh new file mode 100644 index 00000000000..ee8ee1b0582 --- /dev/null +++ b/docker/build/analytics_pipeline/bootstrap.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +: ${HADOOP_HOME:=/edx/app/hadoop/hadoop} + +bash $HADOOP_HOME/etc/hadoop/hadoop-env.sh +. /edx/app/analytics_pipeline/venvs/analytics_pipeline/bin/activate && make develop-local + +# installing libraries if any - (resource urls added comma separated to the ACP system variable) +cd $HADOOP_HOME/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + +if [[ $1 == "-d" ]]; then + while true; do sleep 30; done +fi + +if [[ $1 == "-bash" ]]; then + /bin/bash +fi diff --git a/docker/build/analytics_pipeline/devstack.sh b/docker/build/analytics_pipeline/devstack.sh new file mode 100644 index 00000000000..348eb1c47e6 --- /dev/null +++ b/docker/build/analytics_pipeline/devstack.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . /edx/app/analytics_pipeline/venvs/analytics_pipeline/bin/activate + cd /edx/app/analytics_pipeline/analytics_pipeline + + /bin/bash + ;; + exec) + shift + + . /edx/app/analytics_pipeline/venvs/analytics_pipeline/bin/activate + cd /edx/app/analytics_pipeline/analytics_pipeline + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/docker/build/analytics_pipeline/hive-site.xml.template b/docker/build/analytics_pipeline/hive-site.xml.template new file mode 100644 index 00000000000..b897989cc56 --- /dev/null +++ b/docker/build/analytics_pipeline/hive-site.xml.template @@ -0,0 +1,31 @@ + + + javax.jdo.option.ConnectionURL + jdbc:mysql://mysql/edx_hive_metastore + + + javax.jdo.option.ConnectionDriverName + com.mysql.jdbc.Driver + + + javax.jdo.option.ConnectionUserName + edx_hive + + + javax.jdo.option.ConnectionPassword + edx + + + hive.metastore.warehouse.dir + hdfs://namenode:8020/edx-analytics-pipeline/warehouse/ + location of default database for the warehouse + + + datanucleus.autoCreateSchema + false + + + hive.metastore.schema.verification + true + + diff --git a/docker/build/analytics_pipeline_hadoop_datanode/Dockerfile b/docker/build/analytics_pipeline_hadoop_datanode/Dockerfile new file mode 100644 index 00000000000..b14f85bcc8a --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_datanode/Dockerfile @@ -0,0 +1,34 @@ +FROM uhopper/hadoop:2.7.2 +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list + +ENV HDFS_CONF_dfs_datanode_data_dir=file:///hadoop/dfs/data \ + MYSQL_VERSION=5.6 \ + DEBIAN_FRONTEND=noninteractive +WORKDIR /tmp + +RUN apt-get -y update +RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 +RUN ( apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkps://hkps.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) +RUN echo "deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_VERSION}" > /etc/apt/sources.list.d/mysql.list +RUN apt-get -y update \ + && apt-get install -y mysql-community-client \ + && apt-get install -y --no-install-recommends python python-setuptools \ + && rm -rf /var/lib/apt/lists/* +WORKDIR / +RUN mkdir -p /hadoop/dfs/data +VOLUME /hadoop/dfs/data +ADD docker/build/analytics_pipeline_hadoop_datanode/datanode.sh /run.sh +RUN chmod a+x /run.sh +CMD ["/run.sh"] diff --git a/docker/build/analytics_pipeline_hadoop_datanode/datanode.sh b/docker/build/analytics_pipeline_hadoop_datanode/datanode.sh new file mode 100755 index 00000000000..9f57ee25483 --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_datanode/datanode.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +datadir=`echo $HDFS_CONF_dfs_datanode_data_dir | perl -pe 's#file://##'` +if [ ! -d $datadir ]; then + echo "Datanode data directory not found: $datadir" + exit 2 +fi + +$HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR datanode diff --git a/docker/build/analytics_pipeline_hadoop_namenode/Dockerfile b/docker/build/analytics_pipeline_hadoop_namenode/Dockerfile new file mode 100644 index 00000000000..80143432384 --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_namenode/Dockerfile @@ -0,0 +1,33 @@ +FROM uhopper/hadoop:2.7.2 +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list + +ENV HDFS_CONF_dfs_namenode_name_dir=file:///hadoop/dfs/name \ + MYSQL_VERSION=5.6 \ + DEBIAN_FRONTEND=noninteractive +WORKDIR /tmp +RUN apt-get -y update +RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 +RUN ( apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkps://hkps.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) +RUN echo "deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_VERSION}" > /etc/apt/sources.list.d/mysql.list +RUN apt-get -y update \ + && apt-get install -y mysql-community-client \ + && apt-get install -y --no-install-recommends python python-setuptools \ + && rm -rf /var/lib/apt/lists/* +WORKDIR / +RUN mkdir -p /hadoop/dfs/name +VOLUME /hadoop/dfs/name +ADD docker/build/analytics_pipeline_hadoop_namenode/namenode.sh /run.sh +RUN chmod a+x /run.sh +CMD ["/run.sh"] diff --git a/docker/build/analytics_pipeline_hadoop_namenode/namenode.sh b/docker/build/analytics_pipeline_hadoop_namenode/namenode.sh new file mode 100755 index 00000000000..8ef07970258 --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_namenode/namenode.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +namedir=`echo $HDFS_CONF_dfs_namenode_name_dir | perl -pe 's#file://##'` +if [ ! -d $namedir ]; then + echo "Namenode name directory not found: $namedir" + exit 2 +fi + +if [ -z "$CLUSTER_NAME" ]; then + echo "Cluster name not specified" + exit 2 +fi + +if [ "`ls -A $namedir`" == "" ]; then + echo "Formatting namenode name directory: $namedir" + $HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR namenode -format $CLUSTER_NAME +fi + +$HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR namenode diff --git a/docker/build/analytics_pipeline_hadoop_nodemanager/Dockerfile b/docker/build/analytics_pipeline_hadoop_nodemanager/Dockerfile new file mode 100644 index 00000000000..00130c896f4 --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_nodemanager/Dockerfile @@ -0,0 +1,30 @@ +FROM uhopper/hadoop:2.7.2 +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list + +ENV MYSQL_VERSION=5.6 DEBIAN_FRONTEND=noninteractive +WORKDIR /tmp + +RUN apt-get -y update +RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 +RUN ( apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkps://hkps.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) +RUN echo "deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_VERSION}" > /etc/apt/sources.list.d/mysql.list +RUN apt-get -y update \ + && apt-get install -y mysql-community-client \ + && apt-get install -y --no-install-recommends python python-setuptools \ + && rm -rf /var/lib/apt/lists/* +WORKDIR / +ADD docker/build/analytics_pipeline_hadoop_nodemanager/nodemanager.sh /run.sh +RUN chmod a+x /run.sh +CMD ["/run.sh"] diff --git a/docker/build/analytics_pipeline_hadoop_nodemanager/nodemanager.sh b/docker/build/analytics_pipeline_hadoop_nodemanager/nodemanager.sh new file mode 100644 index 00000000000..115bcdb1d7f --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_nodemanager/nodemanager.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +$HADOOP_PREFIX/bin/yarn --config $HADOOP_CONF_DIR nodemanager diff --git a/docker/build/analytics_pipeline_hadoop_resourcemanager/Dockerfile b/docker/build/analytics_pipeline_hadoop_resourcemanager/Dockerfile new file mode 100644 index 00000000000..9e91460e583 --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_resourcemanager/Dockerfile @@ -0,0 +1,30 @@ +FROM uhopper/hadoop:2.7.2 +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list + +ENV MYSQL_VERSION=5.6 DEBIAN_FRONTEND=noninteractive +WORKDIR /tmp + +RUN apt-get -y update +RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 +RUN ( apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ + || apt-key adv --keyserver hkps://hkps.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) +RUN echo "deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_VERSION}" > /etc/apt/sources.list.d/mysql.list +RUN apt-get -y update \ + && apt-get install -y mysql-community-client \ + && apt-get install -y --no-install-recommends python python-setuptools \ + && rm -rf /var/lib/apt/lists/* +WORKDIR / +ADD docker/build/analytics_pipeline_hadoop_resourcemanager/resourcemanager.sh /run.sh +RUN chmod a+x /run.sh +CMD ["/run.sh"] diff --git a/docker/build/analytics_pipeline_hadoop_resourcemanager/resourcemanager.sh b/docker/build/analytics_pipeline_hadoop_resourcemanager/resourcemanager.sh new file mode 100644 index 00000000000..c1bdb94cd61 --- /dev/null +++ b/docker/build/analytics_pipeline_hadoop_resourcemanager/resourcemanager.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +$HADOOP_PREFIX/bin/yarn --config $HADOOP_CONF_DIR resourcemanager diff --git a/docker/build/analytics_pipeline_spark_master/Dockerfile b/docker/build/analytics_pipeline_spark_master/Dockerfile new file mode 100644 index 00000000000..4271462ede0 --- /dev/null +++ b/docker/build/analytics_pipeline_spark_master/Dockerfile @@ -0,0 +1,42 @@ +FROM bde2020/spark-base:2.1.0-hadoop2.7 +LABEL maintainer="edxops" + +RUN echo "deb [check-valid-until=no] http://archive.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list +RUN sed -i '/deb http:\/\/deb.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list + +ADD docker/build/analytics_pipeline_spark_master/master.sh / +ENV SPARK_MASTER_PORT=7077 \ + SPARK_MASTER_WEBUI_PORT=8080 \ + SPARK_MASTER_LOG=/spark/logs \ + HADOOP_USER_NAME=hadoop \ + SPARK_HOME=/spark \ + PATH=$PATH:/spark/bin \ + CORE_CONF_fs_defaultFS=hdfs://namenode:8020 \ + CORE_CONF_hadoop_http_staticuser_user=root \ + CORE_CONF_hadoop_proxyuser_hue_hosts=* \ + CORE_CONF_hadoop_proxyuser_hue_groups=* \ + HDFS_CONF_dfs_webhdfs_enabled=true \ + HDFS_CONF_dfs_permissions_enabled=false \ + YARN_CONF_yarn_log___aggregation___enable=true \ + YARN_CONF_yarn_resourcemanager_recovery_enabled=true \ + YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore \ + YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate \ + YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs \ + YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/ \ + YARN_CONF_yarn_timeline___service_enabled=true \ + YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true \ + YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true \ + YARN_CONF_yarn_resourcemanager_hostname=resourcemanager \ + YARN_CONF_yarn_timeline___service_hostname=historyserver \ + YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032 \ + YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030 \ + YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031 + +RUN apt-get -o Acquire::Check-Valid-Until=false update && apt-get -y install --reinstall python-pkg-resources \ + && echo 'spark.master spark://sparkmaster:7077\nspark.eventLog.enabled true\nspark.eventLog.dir hdfs://namenode:8020/tmp/spark-events\nspark.history.fs.logDirectory hdfs://namenode:8020/tmp/spark-events' > /spark/conf/spark-defaults.conf + +CMD ["/bin/bash", "/master.sh"] +# 18080: spark history server port +# 4040: spark UI port +# 6066: spark api port +EXPOSE 8080 7077 6066 18080 4040 diff --git a/docker/build/analytics_pipeline_spark_master/master.sh b/docker/build/analytics_pipeline_spark_master/master.sh new file mode 100644 index 00000000000..5042e6323a8 --- /dev/null +++ b/docker/build/analytics_pipeline_spark_master/master.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +export SPARK_MASTER_HOST=`hostname` + +. "/spark/sbin/spark-config.sh" + +. "/spark/bin/load-spark-env.sh" + +mkdir -p $SPARK_MASTER_LOG + +setsid /spark/sbin/start-history-server.sh >/dev/null 2>&1 < /dev/null & + +cd /spark/bin && /spark/sbin/../bin/spark-class org.apache.spark.deploy.master.Master \ + --ip $SPARK_MASTER_HOST --port $SPARK_MASTER_PORT --webui-port $SPARK_MASTER_WEBUI_PORT >> $SPARK_MASTER_LOG/spark-master.out diff --git a/docker/build/analytics_pipeline_spark_worker/Dockerfile b/docker/build/analytics_pipeline_spark_worker/Dockerfile new file mode 100644 index 00000000000..5eb4b1ae85a --- /dev/null +++ b/docker/build/analytics_pipeline_spark_worker/Dockerfile @@ -0,0 +1,38 @@ +FROM bde2020/spark-base:2.1.0-hadoop2.7 +LABEL maintainer="edxops" + +RUN echo "deb [check-valid-until=no] http://archive.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list +RUN sed -i '/deb http:\/\/deb.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list + +ADD docker/build/analytics_pipeline_spark_worker/worker.sh / +ENV SPARK_WORKER_WEBUI_PORT=8081 \ + SPARK_WORKER_LOG=/spark/logs \ + SPARK_MASTER="spark://sparkmaster:7077" \ + SPARK_HOME=/spark \ + CORE_CONF_fs_defaultFS=hdfs://namenode:8020 \ + CORE_CONF_hadoop_http_staticuser_user=root \ + CORE_CONF_hadoop_proxyuser_hue_hosts=* \ + CORE_CONF_hadoop_proxyuser_hue_groups=* \ + HDFS_CONF_dfs_webhdfs_enabled=true \ + HDFS_CONF_dfs_permissions_enabled=false \ + YARN_CONF_yarn_log___aggregation___enable=true \ + YARN_CONF_yarn_resourcemanager_recovery_enabled=true \ + YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore \ + YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate \ + YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs \ + YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/ \ + YARN_CONF_yarn_timeline___service_enabled=true \ + YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true \ + YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true \ + YARN_CONF_yarn_resourcemanager_hostname=resourcemanager \ + YARN_CONF_yarn_timeline___service_hostname=historyserver \ + YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032 \ + YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030 \ + YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031 + +RUN ( apt-key adv --keyserver keyserver.ubuntu.com --recv-key 04EE7237B7D453EC \ + || apt-key adv --keyserver keyserver.ubuntu.com --recv-key 648ACFD622F3D138) + +RUN apt-get -o Acquire::Check-Valid-Until=false update && apt-get -y install --reinstall python-pkg-resources +CMD ["/bin/bash", "/worker.sh"] +EXPOSE 8081 diff --git a/docker/build/analytics_pipeline_spark_worker/worker.sh b/docker/build/analytics_pipeline_spark_worker/worker.sh new file mode 100644 index 00000000000..aefffb34cf3 --- /dev/null +++ b/docker/build/analytics_pipeline_spark_worker/worker.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +. "/spark/sbin/spark-config.sh" + +. "/spark/bin/load-spark-env.sh" + +mkdir -p $SPARK_WORKER_LOG + +/spark/sbin/../bin/spark-class org.apache.spark.deploy.worker.Worker \ + --webui-port $SPARK_WORKER_WEBUI_PORT $SPARK_MASTER >> $SPARK_WORKER_LOG/spark-worker.out diff --git a/docker/build/automated/Dockerfile b/docker/build/automated/Dockerfile new file mode 100644 index 00000000000..cf164d9fd87 --- /dev/null +++ b/docker/build/automated/Dockerfile @@ -0,0 +1,10 @@ +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +ADD . /edx/app/edx_ansible/edx_ansible +COPY docker/build/automated/ansible_overrides.yml / +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook -vvvv automated.yml \ + -i '127.0.0.1,' -c local \ + -e@/ansible_overrides.yml +WORKDIR /edx/app diff --git a/docker/build/automated/ansible_overrides.yml b/docker/build/automated/ansible_overrides.yml new file mode 100644 index 00000000000..676e737240e --- /dev/null +++ b/docker/build/automated/ansible_overrides.yml @@ -0,0 +1,12 @@ +--- +FLOCK_TLD: "edx" + +AUTOMATED_AUTHORIZED_KEYS: ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCx+OpJ6787GWnSo5FcwNPjiM7yqjXKi0FPkfpx8Dd3Oqts5PnJV/xokMP4vJTfXu6Zezh+/NvofgMlnxhnIwC3YIoGkLRhW5vKTZfohPjhyIRu0TyQOgmdocYk2o7xMQ1/fcrQh1sQMQqz79mv1ENKc47dVv7qfdBz803M5gschi4RjMYNMN97AA5rByz/AHJnrxQMWEndOZU+H2X9KRUn1TsWe8s99alILwFrNF1dZzF20r2zMErx48f4zfaczQnLOm+pJ1VrruPI8tQzS9X/kfy8GpBTbTX7X80SuM1Npuazr5sJAalXSZ2ccBVa1fKRwa6PmET21gdxOd2ZUsFYL5wZsPIF2f2ij5XwQxKco2lHH6QsvBzapY1BI5PZ/+mQzoaDO7w6WaaDvSDVxyuG/Sw0kOpA9uVEp3qTs8WT6CUYFmnBd+E8YnH6OwqbS9gfBkSNY9pwq1EpR+DCXVFuzWfYoSGQjxpTFbe7YsShB2Jyf/rZyA7NaS4lEkF8eABG6siEwckWvMOV5Z0lGGLTia1DCOZ3c6X09Te3xY4weYS1c0/Nx15C0rmYsMUeDYDonJWujBbvlOBNpx2opG2KPkSE9PAKWyS/mc4SrW0urJBxjAommVq9//dPTxo7IBmiCNWEcOuhXsQYp5tpDmj32Dh8nvNrkvOFYxb9SxuZgQ== automated@example.com'] + +AUTOMATED_USERS: + testing: + sudo_commands: + - command: "/usr/bin/ls" + sudo_user: "ubuntu" + job_name: "ls" + authorized_keys: "{{ AUTOMATED_AUTHORIZED_KEYS }}" diff --git a/docker/build/bionic-common/Dockerfile b/docker/build/bionic-common/Dockerfile new file mode 100644 index 00000000000..611f22fe54c --- /dev/null +++ b/docker/build/bionic-common/Dockerfile @@ -0,0 +1,21 @@ +FROM ubuntu:bionic + +# Set locale to UTF-8 which is not the default for docker. +# See the links for details: +# http://jaredmarkell.com/docker-and-locales/ +# https://github.com/docker-library/python/issues/13 +# https://github.com/docker-library/python/pull/14/files +RUN apt-get update &&\ + apt-get install -y locales &&\ + locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +ENV CONFIGURATION_REPO="https://github.com/openedx/configuration.git" +ARG OPENEDX_RELEASE=master +ENV CONFIGURATION_VERSION="${OPENEDX_RELEASE}" + +ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh +RUN chmod +x /tmp/ansible-bootstrap.sh +RUN /tmp/ansible-bootstrap.sh diff --git a/docker/build/chrome/Dockerfile b/docker/build/chrome/Dockerfile new file mode 100644 index 00000000000..d4811e942ba --- /dev/null +++ b/docker/build/chrome/Dockerfile @@ -0,0 +1,19 @@ +FROM selenium/standalone-chrome-debug:3.14.0-arsenic +LABEL maintainer="edxops" + +USER root + +# Install a password generator +RUN apt-get update -qqy \ + && apt-get -qqy install \ + pwgen \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/* + +USER seluser + +CMD export VNC_PASSWORD=$(pwgen -s -1 $(shuf -i 10-20 -n 1)) \ + && x11vnc -storepasswd $VNC_PASSWORD /home/seluser/.vnc/passwd \ + && echo "Chrome VNC password: $VNC_PASSWORD" \ + && /opt/bin/entry_point.sh + +EXPOSE 4444 5900 diff --git a/docker/build/credentials/Dockerfile b/docker/build/credentials/Dockerfile new file mode 100644 index 00000000000..a0068dfbb60 --- /dev/null +++ b/docker/build/credentials/Dockerfile @@ -0,0 +1,33 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/credentials/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" +USER root +ENTRYPOINT ["/edx/app/credentials/devstack.sh"] +CMD ["start"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/credentials/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/credentials/credentials.yml /edx/etc/credentials.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook credentials.yml \ + -c local -i "127.0.0.1," \ + -t "install,assets,devstack" \ + --extra-vars="CREDENTIALS_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18150 diff --git a/docker/build/credentials/ansible_overrides.yml b/docker/build/credentials/ansible_overrides.yml new file mode 100644 index 00000000000..f2fb497d52e --- /dev/null +++ b/docker/build/credentials/ansible_overrides.yml @@ -0,0 +1,18 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ CREDENTIALS_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ CREDENTIALS_MYSQL_PASSWORD }}' + +CREDENTIALS_MYSQL_HOST: 'edx.devstack.mysql' +CREDENTIALS_DJANGO_SETTINGS_MODULE: 'credentials.settings.devstack' +CREDENTIALS_GUNICORN_EXTRA: '--reload' +CREDENTIALS_MEMCACHE: ['edx.devstack.memcached:11211'] +CREDENTIALS_EXTRA_APPS: ['credentials.apps.edx_credentials_extensions'] +CREDENTIALS_URL_ROOT: 'http://localhost:18150/' + +edx_django_service_is_devstack: true + +# NOTE: The creation of demo data requires database access, +# which we don't have when making new images. +credentials_create_demo_data: false diff --git a/docker/build/credentials/credentials.yml b/docker/build/credentials/credentials.yml new file mode 100644 index 00000000000..4cd04b6c997 --- /dev/null +++ b/docker/build/credentials/credentials.yml @@ -0,0 +1,61 @@ +--- + +API_ROOT: null +BACKEND_SERVICE_EDX_OAUTH2_KEY: credentials-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: credentials-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: credentials + LOCATION: + - edx.devstack.memcached:11211 +CERTIFICATE_LANGUAGES: + en: English + es_419: Spanish +CREDENTIALS_SERVICE_USER: credentials_service_user +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ATOMIC_REQUESTS: false + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: credentials + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: credentials001 +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +EXTRA_APPS: +- credentials.apps.edx_credentials_extensions +FILE_STORAGE_BACKEND: {} +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/credentials/media + MEDIA_URL: /media/ +SECRET_KEY: SET-ME-TO-A-UNIQUE-LONG-RANDOM-STRING +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: credentials-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: credentials-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: django.contrib.staticfiles.storage.ManifestStaticFilesStorage +STATIC_ROOT: /edx/var/credentials/staticfiles +TIME_ZONE: UTC +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME diff --git a/docker/build/designer/Dockerfile b/docker/build/designer/Dockerfile new file mode 100644 index 00000000000..3ecd24c5244 --- /dev/null +++ b/docker/build/designer/Dockerfile @@ -0,0 +1,32 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/designer/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +MAINTAINER edxops +USER root +ENTRYPOINT ["/edx/app/designer/devstack.sh"] +CMD ["start"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/designer/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook designer.yml \ + -c local -i "127.0.0.1," \ + -t "install,assets,devstack" \ + --extra-vars="DESIGNER_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18808 diff --git a/docker/build/designer/ansible_overrides.yml b/docker/build/designer/ansible_overrides.yml new file mode 100644 index 00000000000..f1b30d9eab3 --- /dev/null +++ b/docker/build/designer/ansible_overrides.yml @@ -0,0 +1,14 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ DESIGNER_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ DESIGNER_MYSQL_PASSWORD }}' + +DESIGNER_MYSQL_HOST: 'edx.devstack.mysql' +DESIGNER_DJANGO_SETTINGS_MODULE: 'designer.settings.devstack' +DESIGNER_GUNICORN_EXTRA: '--reload' +DESIGNER_MEMCACHE: ['edx.devstack.memcached:11211'] +DESIGNER_EXTRA_APPS: [] +DESIGNER_URL_ROOT: 'http://designer:18808/' + +edx_django_service_is_devstack: true diff --git a/docker/build/docker-tools/Dockerfile b/docker/build/docker-tools/Dockerfile new file mode 100644 index 00000000000..0b18391a747 --- /dev/null +++ b/docker/build/docker-tools/Dockerfile @@ -0,0 +1,24 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/docker-tools/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +ENV REPO_OWNER=edx + +ADD . /edx/app/edx_ansible/edx_ansible + +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/docker-tools/ansible_overrides.yml / +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook docker-tools.yml \ + -c local -i '127.0.0.1,' \ + -t 'install' +RUN which docker +RUN which docker-compose diff --git a/docker/build/docker-tools/ansible_overrides.yml b/docker/build/docker-tools/ansible_overrides.yml new file mode 100644 index 00000000000..e54b63a6af1 --- /dev/null +++ b/docker/build/docker-tools/ansible_overrides.yml @@ -0,0 +1,2 @@ +--- +DOCKER_TLD: "edx" diff --git a/docker/build/ecommerce/Dockerfile b/docker/build/ecommerce/Dockerfile new file mode 100644 index 00000000000..6662f49fc1a --- /dev/null +++ b/docker/build/ecommerce/Dockerfile @@ -0,0 +1,33 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/ecommerce/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" +USER root +ENTRYPOINT ["/edx/app/ecommerce/devstack.sh"] +CMD ["start"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/ecommerce/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/ecommerce/ecommerce.yml /edx/etc/ecommerce.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook ecommerce.yml \ + -c local -i '127.0.0.1,' \ + -t 'install,assets,devstack' \ + --extra-vars="ECOMMERCE_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18130 diff --git a/docker/build/ecommerce/ansible_overrides.yml b/docker/build/ecommerce/ansible_overrides.yml new file mode 100644 index 00000000000..4c5f8c7c915 --- /dev/null +++ b/docker/build/ecommerce/ansible_overrides.yml @@ -0,0 +1,31 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ ECOMMERCE_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ ECOMMERCE_MYSQL_PASSWORD }}' + +# NOTE: Theming requires downloading a theme from a separate Git repo. This repo (edx/edx-themes) is private for +# edX.org. In order to build an image with these themes, you must update COMMON_GIT_IDENTITY to an SSH key with access +# to the private repo. Otherwise, the sample-themes repository, which has no ecommerce themes, will be downloaded if +# comprehensive theming is enabled. +ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING: false +#THEMES_GIT_IDENTITY: "{{ COMMON_GIT_IDENTITY }}" +#THEMES_GIT_PROTOCOL: "ssh" +#THEMES_GIT_MIRROR: "github.com" +#THEMES_GIT_PATH: "edx" +#THEMES_REPO: "edx-themes.git" + +ECOMMERCE_DATABASE_HOST: 'edx.devstack.mysql57' +ECOMMERCE_DJANGO_SETTINGS_MODULE: 'ecommerce.settings.devstack' +ECOMMERCE_GUNICORN_EXTRA: '--reload' +ECOMMERCE_MEMCACHE: ['edx.devstack.memcached:11211'] +ECOMMERCE_ECOMMERCE_URL_ROOT: 'http://localhost:18130/' +ECOMMERCE_LMS_URL_ROOT: 'http://edx.devstack.lms:18000/' +ECOMMERCE_DISCOVERY_SERVICE_URL: 'http://edx.devstack.discovery:18381/' +ECOMMERCE_USE_PYTHON38: true + +edx_django_service_is_devstack: true + +# NOTE: The creation of demo data requires database access, +# which we don't have when making new images. +ecommerce_create_demo_data: false diff --git a/docker/build/ecommerce/ecommerce.yml b/docker/build/ecommerce/ecommerce.yml new file mode 100644 index 00000000000..84ab0d84f41 --- /dev/null +++ b/docker/build/ecommerce/ecommerce.yml @@ -0,0 +1,130 @@ +--- + +AFFILIATE_COOKIE_KEY: dev_affiliate_id +API_ROOT: null +BACKEND_SERVICE_EDX_OAUTH2_KEY: ecommerce-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: ecommerce-backend-service-secret +ECOMMERCE_WORKER_BROKER_HOST: 172.17.0.2 +BROKER_URL: amqp://celery:celery@172.17.0.2:5672 +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: ecommerce + LOCATION: + - edx.devstack.memcached:11211 +COMPREHENSIVE_THEME_DIRS: +- /edx/var/edx-themes/edx-themes/ecommerce +- /edx/app/ecommerce/ecommerce/ecommerce/themes +CORS_ALLOW_CREDENTIALS: false +CORS_ORIGIN_WHITELIST: [] +CORS_URLS_REGEX: '' +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ATOMIC_REQUESTS: true + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: ecommerce + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: ecomm001 +DEFAULT_SITE_THEME: null +ECOMMERCE_URL_ROOT: http://localhost:18130 +EDX_API_KEY: PUT_YOUR_API_KEY_HERE +EDX_DRF_EXTENSIONS: + JWT_PAYLOAD_MERGEABLE_USER_ATTRIBUTES: + - tracking_context + JWT_PAYLOAD_USER_ATTRIBUTE_MAPPING: + administrator: is_staff + email: email + full_name: full_name + tracking_context: tracking_context + user_id: lms_user_id + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +ENABLE_COMPREHENSIVE_THEMING: false +ENROLLMENT_FULFILLMENT_TIMEOUT: 7 +ENTERPRISE_SERVICE_URL: http://edx.devstack.lms:18000/enterprise/ +ENTERPRISE_LEARNER_PORTAL_HOSTNAME: localhost:8734 +EXTRA_APPS: [] +JWT_AUTH: + JWT_ALGORITHM: HS256 + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + - AUDIENCE: lms-key + ISSUER: ecommerce_worker + SECRET_KEY: lms-secret + JWT_LEEWAY: 1 + JWT_PUBLIC_SIGNING_JWK_SET: '' + JWT_SECRET_KEY: lms-secret + JWT_VERIFY_EXPIRATION: true +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +LOGGING_ROOT_OVERRIDES: {} +LOGGING_SUBSECTION_OVERRIDES: {} +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/ecommerce/media + MEDIA_URL: /media/ +OSCAR_FROM_EMAIL: oscar@example.com +PAYMENT_MICROFRONTEND_URL: null +PAYMENT_PROCESSOR_CONFIG: + edx: + cybersource: + access_key: SET-ME-PLEASE + apple_pay_country_code: US + apple_pay_merchant_id_certificate_path: /edx/etc/ssl/apple_pay_merchant.pem + apple_pay_merchant_id_domain_association: 'This value should also be in + private configuration. It, too, + + will span multiple lines. + + ' + apple_pay_merchant_identifier: merchant.com.example + cancel_page_url: /checkout/cancel-checkout/ + merchant_id: SET-ME-PLEASE + payment_page_url: https://testsecureacceptance.cybersource.com/pay + profile_id: SET-ME-PLEASE + receipt_page_url: /checkout/receipt/ + secret_key: SET-ME-PLEASE + send_level_2_3_details: true + soap_api_url: https://ics2wstest.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.140.wsdl + sop_access_key: SET-ME-PLEASE + sop_payment_page_url: https://testsecureacceptance.cybersource.com/silent/pay + sop_profile_id: SET-ME-PLEASE + sop_secret_key: SET-ME-PLEASE + transaction_key: SET-ME-PLEASE + paypal: + cancel_url: /checkout/cancel-checkout/ + client_id: SET-ME-PLEASE + client_secret: SET-ME-PLEASE + error_url: /checkout/error/ + mode: sandbox + receipt_url: /checkout/receipt/ +PLATFORM_NAME: Your Platform Name Here +SAILTHRU_KEY: sailthru key here +SAILTHRU_SECRET: sailthru secret here +SECRET_KEY: Your secret key here +SESSION_COOKIE_SECURE: true +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: ecommerce-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: ecommerce-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: ecommerce.theming.storage.ThemeStorage +STATIC_ROOT: /edx/var/ecommerce/staticfiles +THEME_SCSS: sass/themes/default.scss +TIME_ZONE: UTC +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME +SDN_CHECK_API_URL: https://data.trade.gov/consolidated_screening_list/v1/search +SDN_CHECK_API_KEY: sdn search key here diff --git a/docker/build/ecomworker/Dockerfile b/docker/build/ecomworker/Dockerfile new file mode 100644 index 00000000000..22464865dc7 --- /dev/null +++ b/docker/build/ecomworker/Dockerfile @@ -0,0 +1,31 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/ecomworker/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" + +ADD . /edx/app/edx_ansible/edx_ansible + +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/ecomworker/ansible_overrides.yml / +COPY docker/build/ecomworker/ecomworker.yml /edx/etc/ecomworker.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook ecomworker.yml \ + -c local -i '127.0.0.1,' \ + -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" \ + --extra-vars="ECOMMERCE_WORKER_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" + +USER root +ENTRYPOINT ["/edx/app/ecomworker/devstack.sh"] +CMD ["start"] diff --git a/docker/build/ecomworker/ansible_overrides.yml b/docker/build/ecomworker/ansible_overrides.yml new file mode 100644 index 00000000000..9ab2431b912 --- /dev/null +++ b/docker/build/ecomworker/ansible_overrides.yml @@ -0,0 +1,4 @@ +--- + +DOCKER_TLD: "edx" +devstack: true diff --git a/docker/build/ecomworker/ecomworker.yml b/docker/build/ecomworker/ecomworker.yml new file mode 100644 index 00000000000..49c40e8a34d --- /dev/null +++ b/docker/build/ecomworker/ecomworker.yml @@ -0,0 +1,21 @@ +--- + +BROKER_URL: amqp://celery:celery@172.17.0.2:5672 +ECOMMERCE_API_ROOT: http://127.0.0.1:8002/api/v2/ +JWT_ISSUER: ecommerce_worker +JWT_SECRET_KEY: insecure-secret-key +MAX_FULFILLMENT_RETRIES: 11 +SAILTHRU: + SAILTHRU_ABANDONED_CART_DELAY: 60 + SAILTHRU_ABANDONED_CART_TEMPLATE: null + SAILTHRU_CACHE_TTL_SECONDS: 3600 + SAILTHRU_ENABLE: false + SAILTHRU_ENROLL_TEMPLATE: null + SAILTHRU_KEY: sailthru key here + SAILTHRU_MINIMUM_COST: 100 + SAILTHRU_PURCHASE_TEMPLATE: null + SAILTHRU_RETRY_ATTEMPTS: 6 + SAILTHRU_RETRY_SECONDS: 3600 + SAILTHRU_SECRET: sailthru secret here + SAILTHRU_UPGRADE_TEMPLATE: null +SITE_OVERRIDES: null diff --git a/docker/build/ecomworker/inventory b/docker/build/ecomworker/inventory new file mode 100644 index 00000000000..8bb7ba6b33a --- /dev/null +++ b/docker/build/ecomworker/inventory @@ -0,0 +1,2 @@ +[local] +localhost diff --git a/docker/build/edxapp/Dockerfile b/docker/build/edxapp/Dockerfile new file mode 100644 index 00000000000..b3942d5863a --- /dev/null +++ b/docker/build/edxapp/Dockerfile @@ -0,0 +1,37 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/edxapp/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" +USER root +ENTRYPOINT ["/edx/app/edxapp/devstack.sh"] +CMD ["start"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/edxapp/ansible_overrides.yml / +COPY docker/build/edxapp/devstack.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/edxapp/studio.yml /edx/etc/studio.yml +COPY docker/build/edxapp/lms.yml /edx/etc/lms.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook edxapp.yml \ + -c local -i '127.0.0.1,' \ + -t 'install,assets,devstack' \ + --extra-vars="EDX_PLATFORM_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" \ + && rm -rf /edx/app/edxapp/edx-platform + +EXPOSE 18000 18010 diff --git a/docker/build/edxapp/ansible_overrides.yml b/docker/build/edxapp/ansible_overrides.yml new file mode 100644 index 00000000000..d39e164b9c4 --- /dev/null +++ b/docker/build/edxapp/ansible_overrides.yml @@ -0,0 +1,41 @@ +--- +EDXAPP_SETTINGS: 'devstack_docker' +EDXAPP_MEMCACHE: ['edx.devstack.memcached:11211'] +EDXAPP_MYSQL_HOST: "edx.devstack.mysql57" +EDXAPP_MONGO_HOSTS: + - "edx.devstack.mongo" + +devstack: true +migrate_db: false +mongo_enable_journal: false +edxapp_npm_production: "no" + +EDXAPP_LMS_GUNICORN_EXTRA_CONF: 'reload = True' + +EDXAPP_NO_PREREQ_INSTALL: 0 +COMMON_SSH_PASSWORD_AUTH: "yes" +EDXAPP_CMS_BASE: "edx.devstack.studio:18010" +EDXAPP_OAUTH_ENFORCE_SECURE: false +EDXAPP_LMS_BASE_SCHEME: http +COMMON_SECURITY_UPDATES: true +SECURITY_UPGRADE_ON_ANSIBLE: true + +EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false + +EDXAPP_SEARCH_HOST: 'edx.devstack.elasticsearch' + +EDXAPP_EDXAPP_SECRET_KEY: 'DUMMY KEY ONLY FOR TO DEVSTACK' + +edxapp_debian_pkgs_extra: + - mongodb-clients + - mysql-client + +edxapp_environment_extra: + SELENIUM_BROWSER: 'firefox' + SELENIUM_HOST: 'edx.devstack.firefox' + SELENIUM_PORT: '4444' + +EDXAPP_XQUEUE_URL: 'http://edx.devstack.xqueue:18040/' + +EDXAPP_ENABLE_EDXNOTES: true +EDXAPP_EDXNOTES_INTERNAL_API: 'http://edx.devstack.edx_notes_api:18120/api/v1' diff --git a/docker/build/edxapp/devstack.yml b/docker/build/edxapp/devstack.yml new file mode 120000 index 00000000000..85924141566 --- /dev/null +++ b/docker/build/edxapp/devstack.yml @@ -0,0 +1 @@ +../../../playbooks/roles/edxapp/vars/devstack.yml \ No newline at end of file diff --git a/docker/build/edxapp/lms.yml b/docker/build/edxapp/lms.yml new file mode 100644 index 00000000000..402e7fd296a --- /dev/null +++ b/docker/build/edxapp/lms.yml @@ -0,0 +1,577 @@ +ACCOUNT_MICROFRONTEND_URL: null +ACE_CHANNEL_DEFAULT_EMAIL: django_email +ACE_CHANNEL_SAILTHRU_API_KEY: '' +ACE_CHANNEL_SAILTHRU_API_SECRET: '' +ACE_CHANNEL_SAILTHRU_DEBUG: true +ACE_CHANNEL_SAILTHRU_TEMPLATE_NAME: null +ACE_CHANNEL_TRANSACTIONAL_EMAIL: django_email +ACE_ENABLED_CHANNELS: +- django_email +ACE_ENABLED_POLICIES: +- bulk_email_optout +ACE_ROUTING_KEY: edx.lms.core.default +ACTIVATION_EMAIL_SUPPORT_LINK: '' +AFFILIATE_COOKIE_NAME: dev_affiliate_id +ALTERNATE_WORKER_QUEUES: cms +ANALYTICS_API_KEY: '' +ANALYTICS_API_URL: http://localhost:18100 +ANALYTICS_DASHBOARD_NAME: Your Platform Name Here Insights +ANALYTICS_DASHBOARD_URL: http://localhost:18110/courses +API_ACCESS_FROM_EMAIL: api-requests@example.com +API_ACCESS_MANAGER_EMAIL: api-access@example.com +API_DOCUMENTATION_URL: http://course-catalog-api-guide.readthedocs.io/en/latest/ +AUTH_DOCUMENTATION_URL: http://course-catalog-api-guide.readthedocs.io/en/latest/authentication/index.html +AUTH_PASSWORD_VALIDATORS: +- NAME: django.contrib.auth.password_validation.UserAttributeSimilarityValidator +- NAME: common.djangoapps.util.password_policy_validators.MinimumLengthValidator + OPTIONS: + min_length: 2 +- NAME: common.djangoapps.util.password_policy_validators.MaximumLengthValidator + OPTIONS: + max_length: 75 +AWS_ACCESS_KEY_ID: null +AWS_QUERYSTRING_AUTH: false +AWS_S3_CUSTOM_DOMAIN: SET-ME-PLEASE (ex. bucket-name.s3.amazonaws.com) +AWS_SECRET_ACCESS_KEY: null +AWS_SES_REGION_ENDPOINT: email.us-east-1.amazonaws.com +AWS_SES_REGION_NAME: us-east-1 +AWS_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +BASE_COOKIE_DOMAIN: localhost +BLOCKSTORE_API_URL: http://localhost:18250/api/v1 +BLOCKSTORE_PUBLIC_URL_ROOT: http://localhost:18250 +BLOCK_STRUCTURES_SETTINGS: + COURSE_PUBLISH_TASK_DELAY: 30 + PRUNING_ACTIVE: false + TASK_DEFAULT_RETRY_DELAY: 30 + TASK_MAX_RETRIES: 5 +BRANCH_IO_KEY: '' +BUGS_EMAIL: bugs@example.com +BULK_EMAIL_DEFAULT_FROM_EMAIL: no-reply@example.com +BULK_EMAIL_EMAILS_PER_TASK: 500 +BULK_EMAIL_LOG_SENT_EMAILS: false +BULK_EMAIL_ROUTING_KEY_SMALL_JOBS: edx.lms.core.default +CACHES: + celery: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: celery + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + configuration: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4 + LOCATION: + - edx.devstack.memcached:11211 + course_structure_cache: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: course_structure + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: default + LOCATION: + - edx.devstack.memcached:11211 + VERSION: '1' + general: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: general + LOCATION: + - edx.devstack.memcached:11211 + mongo_metadata_inheritance: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: mongo_metadata_inheritance + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: 300 + staticfiles: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4_general + LOCATION: + - edx.devstack.memcached:11211 +CAS_ATTRIBUTE_CALLBACK: '' +CAS_EXTRA_LOGIN_PARAMS: '' +CAS_SERVER_URL: '' +CELERY_BROKER_HOSTNAME: localhost +CELERY_BROKER_PASSWORD: celery +CELERY_BROKER_TRANSPORT: amqp +CELERY_BROKER_USER: celery +CELERY_BROKER_USE_SSL: false +CELERY_BROKER_VHOST: '' +CELERY_EVENT_QUEUE_TTL: null +CELERY_TIMEZONE: UTC +CELERY_RESULT_BACKEND: django-cache +CERTIFICATE_TEMPLATE_LANGUAGES: + en: English + es: Español +CERT_QUEUE: certificates +CMS_BASE: edx.devstack.studio:18010 +CODE_JAIL: + limits: + CPU: 1 + FSIZE: 1048576 + PROXY: 0 + REALTIME: 3 + VMEM: 536870912 + python_bin: '' + user: sandbox +COMMENTS_SERVICE_KEY: password +COMMENTS_SERVICE_URL: http://localhost:18080 +COMPREHENSIVE_THEME_DIRS: +- '' +COMPREHENSIVE_THEME_LOCALE_PATHS: [] +CONTACT_EMAIL: info@example.com +CONTACT_MAILING_ADDRESS: SET-ME-PLEASE +CONTENTSTORE: + ADDITIONAL_OPTIONS: {} + DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: SECONDARY_PREFERRED + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.contentstore.mongo.MongoContentStore + OPTIONS: + auth_source: null + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + ssl: false + user: edxapp +CORS_ORIGIN_ALLOW_ALL: false +CORS_ORIGIN_WHITELIST: [] +COURSES_WITH_UNSAFE_CODE: [] +COURSE_ABOUT_VISIBILITY_PERMISSION: see_exists +COURSE_CATALOG_API_URL: http://localhost:8008/api/v1 +COURSE_CATALOG_VISIBILITY_PERMISSION: see_exists +CREDENTIALS_INTERNAL_SERVICE_URL: http://localhost:8005 +CREDENTIALS_PUBLIC_SERVICE_URL: http://localhost:8005 +CREDIT_HELP_LINK_URL: '' +CREDIT_PROVIDER_SECRET_KEYS: {} +CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: '' +CROSS_DOMAIN_CSRF_COOKIE_NAME: '' +CSRF_COOKIE_SECURE: false +CSRF_TRUSTED_ORIGINS: [] +DATABASES: + default: + ATOMIC_REQUESTS: true + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql57 + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + read_replica: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql57 + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + student_module_history: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql57 + NAME: edxapp_csmh + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 +DATA_DIR: /edx/var/edxapp +DEFAULT_COURSE_VISIBILITY_IN_CATALOG: both +DEFAULT_FEEDBACK_EMAIL: feedback@example.com +DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage +DEFAULT_FROM_EMAIL: registration@example.com +DEFAULT_JWT_ISSUER: + AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret +DEFAULT_MOBILE_AVAILABLE: false +DEFAULT_SITE_THEME: '' +DEPRECATED_ADVANCED_COMPONENT_TYPES: [] +DJFS: + directory_root: /edx/var/edxapp/django-pyfs/static/django-pyfs + type: osfs + url_root: /static/django-pyfs +DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: SECONDARY_PREFERRED + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp +ECOMMERCE_API_SIGNING_KEY: lms-secret +ECOMMERCE_API_URL: http://localhost:8002/api/v2 +ECOMMERCE_PUBLIC_URL_ROOT: http://localhost:8002 +EDXMKTG_USER_INFO_COOKIE_NAME: edx-user-info +EDXNOTES_INTERNAL_API: http://edx.devstack.edx_notes_api:18120/api/v1 +EDXNOTES_PUBLIC_API: http://localhost:18120/api/v1 +EDX_API_KEY: PUT_YOUR_API_KEY_HERE +EDX_PLATFORM_REVISION: master +ELASTIC_SEARCH_CONFIG: +- host: edx.devstack.elasticsearch710 + port: 9200 + use_ssl: false +EMAIL_BACKEND: django.core.mail.backends.smtp.EmailBackend +EMAIL_HOST: localhost +EMAIL_HOST_PASSWORD: '' +EMAIL_HOST_USER: '' +EMAIL_PORT: 25 +EMAIL_USE_TLS: false +ENABLE_COMPREHENSIVE_THEMING: false +ENTERPRISE_API_URL: http://edx.devstack.lms:18000/enterprise/api/v1 +ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES: +- audit +- honor +ENTERPRISE_CUSTOMER_SUCCESS_EMAIL: customersuccess@edx.org +ENTERPRISE_ENROLLMENT_API_URL: http://edx.devstack.lms:18000/api/enrollment/v1/ +ENTERPRISE_INTEGRATIONS_EMAIL: enterprise-integrations@edx.org +ENTERPRISE_LEARNER_PORTAL_HOSTNAME: localhost:8734 +ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: {} +ENTERPRISE_SERVICE_WORKER_USERNAME: enterprise_worker +ENTERPRISE_SUPPORT_URL: '' +ENTERPRISE_TAGLINE: '' +EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST: [] +EXTRA_MIDDLEWARE_CLASSES: [] +FACEBOOK_API_VERSION: v2.1 +FACEBOOK_APP_ID: FACEBOOK_APP_ID +FACEBOOK_APP_SECRET: FACEBOOK_APP_SECRET +FEATURES: + AUTOMATIC_AUTH_FOR_TESTING: false + CUSTOM_COURSES_EDX: false + ENABLE_COMBINED_LOGIN_REGISTRATION: true + ENABLE_CORS_HEADERS: false + ENABLE_COUNTRY_ACCESS: false + ENABLE_CREDIT_API: false + ENABLE_CREDIT_ELIGIBILITY: false + ENABLE_CROSS_DOMAIN_CSRF_COOKIE: false + ENABLE_CSMH_EXTENDED: true + ENABLE_DISCUSSION_HOME_PANEL: true + ENABLE_DISCUSSION_SERVICE: true + ENABLE_EDXNOTES: true + ENABLE_ENROLLMENT_RESET: false + ENABLE_GRADE_DOWNLOADS: true + ENABLE_MKTG_SITE: false + ENABLE_MOBILE_REST_API: false + ENABLE_OAUTH2_PROVIDER: false + ENABLE_PUBLISHER: false + ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES: true + ENABLE_SPECIAL_EXAMS: false + ENABLE_SYSADMIN_DASHBOARD: false + ENABLE_THIRD_PARTY_AUTH: true + ENABLE_VIDEO_UPLOAD_PIPELINE: false + PREVIEW_LMS_BASE: preview.localhost:18000 + SHOW_FOOTER_LANGUAGE_SELECTOR: false + SHOW_HEADER_LANGUAGE_SELECTOR: false +FEEDBACK_SUBMISSION_EMAIL: '' +FERNET_KEYS: +- DUMMY KEY CHANGE BEFORE GOING TO PRODUCTION +FILE_UPLOAD_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +FILE_UPLOAD_STORAGE_PREFIX: submissions_attachments +FINANCIAL_REPORTS: + BUCKET: null + ROOT_PATH: sandbox + STORAGE_TYPE: localfs +FOOTER_ORGANIZATION_IMAGE: images/logo.png +GITHUB_REPO_ROOT: /edx/var/edxapp/data +GIT_REPO_DIR: /edx/var/edxapp/course_repos +GOOGLE_ANALYTICS_ACCOUNT: null +GOOGLE_ANALYTICS_LINKEDIN: '' +GOOGLE_ANALYTICS_TRACKING_ID: '' +GOOGLE_SITE_VERIFICATION_ID: '' +GRADES_DOWNLOAD: + BUCKET: '' + ROOT_PATH: '' + STORAGE_CLASS: django.core.files.storage.FileSystemStorage + STORAGE_KWARGS: + location: /tmp/edx-s3/grades + STORAGE_TYPE: '' +HELP_TOKENS_BOOKS: + course_author: http://edx.readthedocs.io/projects/open-edx-building-and-running-a-course + learner: http://edx.readthedocs.io/projects/open-edx-learner-guide +ICP_LICENSE: null +ICP_LICENSE_INFO: {} +IDA_LOGOUT_URI_LIST: [] +ID_VERIFICATION_SUPPORT_LINK: '' +JWT_AUTH: + JWT_AUDIENCE: lms-key + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUER: http://localhost:18000/oauth2 + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PRIVATE_SIGNING_JWK: None + JWT_PUBLIC_SIGNING_JWK_SET: '' + JWT_SECRET_KEY: lms-secret + JWT_SIGNING_ALGORITHM: null +JWT_EXPIRATION: 30 +JWT_ISSUER: http://localhost:18000/oauth2 +JWT_PRIVATE_SIGNING_KEY: null +LANGUAGE_CODE: en +LANGUAGE_COOKIE: openedx-language-preference +LEARNER_PORTAL_URL_ROOT: https://learner-portal-edx.devstack.lms:18000 +LMS_BASE: edx.devstack.lms:18000 +LMS_INTERNAL_ROOT_URL: http://edx.devstack.lms:18000 +LMS_ROOT_URL: http://edx.devstack.lms:18000 +LOCAL_LOGLEVEL: INFO +LOGGING_ENV: sandbox +LOGIN_REDIRECT_WHITELIST: [] +LOG_DIR: /edx/var/log/edx +LTI_AGGREGATE_SCORE_PASSBACK_DELAY: 900 +LTI_USER_EMAIL_DOMAIN: lti.example.com +MAILCHIMP_NEW_USER_LIST_ID: null +MAINTENANCE_BANNER_TEXT: Sample banner message +MEDIA_ROOT: /edx/var/edxapp/media/ +MEDIA_URL: /media/ +MKTG_URLS: {} +MKTG_URL_LINK_MAP: {} +MOBILE_STORE_URLS: {} +MODULESTORE: + default: + ENGINE: xmodule.modulestore.mixed.MixedModuleStore + OPTIONS: + mappings: {} + stores: + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: SECONDARY_PREFERRED + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore + NAME: split + OPTIONS: + default_class: xmodule.hidden_block.HiddenBlock + fs_root: /edx/var/edxapp/data + render_template: common.djangoapps.edxmako.shortcuts.render_to_string + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.mongo.DraftMongoModuleStore + NAME: draft + OPTIONS: + default_class: xmodule.hidden_block.HiddenBlock + fs_root: /edx/var/edxapp/data + render_template: common.djangoapps.edxmako.shortcuts.render_to_string +OAUTH_DELETE_EXPIRED: true +OAUTH_ENFORCE_SECURE: false +OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS: 365 +OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS: 30 +OPTIMIZELY_PROJECT_ID: null +ORA2_FILE_PREFIX: default_env-default_deployment/ora2 +ORDER_HISTORY_MICROFRONTEND_URL: null +PAID_COURSE_REGISTRATION_CURRENCY: +- usd +- $ +PARENTAL_CONSENT_AGE_LIMIT: 13 +PARTNER_SUPPORT_EMAIL: '' +PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: + ENFORCE_COMPLIANCE_ON_LOGIN: false +PASSWORD_RESET_SUPPORT_LINK: '' +PAYMENT_SUPPORT_EMAIL: billing@example.com +PDF_RECEIPT_BILLING_ADDRESS: 'Enter your receipt billing + + address here. + + ' +PDF_RECEIPT_COBRAND_LOGO_PATH: '' +PDF_RECEIPT_DISCLAIMER_TEXT: 'ENTER YOUR RECEIPT DISCLAIMER TEXT HERE. + + ' +PDF_RECEIPT_FOOTER_TEXT: 'Enter your receipt footer text here. + + ' +PDF_RECEIPT_LOGO_PATH: '' +PDF_RECEIPT_TAX_ID: 00-0000000 +PDF_RECEIPT_TAX_ID_LABEL: fake Tax ID +PDF_RECEIPT_TERMS_AND_CONDITIONS: 'Enter your receipt terms and conditions here. + + ' +PLATFORM_DESCRIPTION: Your Platform Description Here +PLATFORM_FACEBOOK_ACCOUNT: http://www.facebook.com/YourPlatformFacebookAccount +PLATFORM_NAME: Your Platform Name Here +PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount' +POLICY_CHANGE_GRADES_ROUTING_KEY: edx.lms.core.default +SINGLE_LEARNER_COURSE_REGRADE_ROUTING_KEY: edx.lms.core.default +PRESS_EMAIL: press@example.com +PROCTORING_BACKENDS: + DEFAULT: 'null' + 'null': {} +PROCTORING_SETTINGS: {} +PROFILE_IMAGE_BACKEND: + class: openedx.core.storage.OverwriteStorage + options: + base_url: /media/profile-images/ + location: /edx/var/edxapp/media/profile-images/ +PROFILE_IMAGE_MAX_BYTES: 1048576 +PROFILE_IMAGE_MIN_BYTES: 100 +PROFILE_IMAGE_SECRET_KEY: placeholder_secret_key +PROFILE_IMAGE_SIZES_MAP: + full: 500 + large: 120 + medium: 50 + small: 30 +PROFILE_MICROFRONTEND_URL: null +PROGRAM_CERTIFICATES_ROUTING_KEY: edx.lms.core.default +PROGRAM_CONSOLE_MICROFRONTEND_URL: null +RECALCULATE_GRADES_ROUTING_KEY: edx.lms.core.default +REGISTRATION_EXTRA_FIELDS: + city: hidden + confirm_email: hidden + country: required + gender: optional + goals: optional + honor_code: required + level_of_education: optional + mailing_address: hidden + terms_of_service: hidden + year_of_birth: optional +RETIRED_EMAIL_DOMAIN: retired.invalid +RETIRED_EMAIL_PREFIX: retired__user_ +RETIRED_USERNAME_PREFIX: retired__user_ +RETIRED_USER_SALTS: +- OVERRIDE ME WITH A RANDOM VALUE +- ROTATE SALTS BY APPENDING NEW VALUES +RETIREMENT_SERVICE_WORKER_USERNAME: retirement_worker +RETIREMENT_STATES: +- PENDING +- ERRORED +- ABORTED +- COMPLETE +SECRET_KEY: DUMMY KEY ONLY FOR TO DEVSTACK +SEGMENT_KEY: null +SERVER_EMAIL: sre@example.com +SESSION_COOKIE_DOMAIN: '' +SESSION_COOKIE_NAME: sessionid +SESSION_COOKIE_SECURE: false +SESSION_SAVE_EVERY_REQUEST: false +SITE_NAME: localhost +SOCIAL_AUTH_OAUTH_SECRETS: '' +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '' +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT: {} +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: '' +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT: {} +SOCIAL_MEDIA_FOOTER_URLS: {} +SOCIAL_SHARING_SETTINGS: + CERTIFICATE_FACEBOOK: false + CERTIFICATE_TWITTER: false + CUSTOM_COURSE_URLS: false + DASHBOARD_FACEBOOK: false + DASHBOARD_TWITTER: false +STATIC_ROOT_BASE: /edx/var/edxapp/staticfiles +STATIC_URL_BASE: /static/ +STUDIO_NAME: Studio +STUDIO_SHORT_NAME: Studio +SUPPORT_SITE_LINK: '' +SWIFT_AUTH_URL: null +SWIFT_AUTH_VERSION: null +SWIFT_KEY: null +SWIFT_REGION_NAME: null +SWIFT_TEMP_URL_DURATION: 1800 +SWIFT_TEMP_URL_KEY: null +SWIFT_TENANT_ID: null +SWIFT_TENANT_NAME: null +SWIFT_USERNAME: null +SWIFT_USE_TEMP_URLS: false +SYSLOG_SERVER: '' +SYSTEM_WIDE_ROLE_CLASSES: [] +TECH_SUPPORT_EMAIL: technical@example.com +TIME_ZONE: America/New_York +TRACKING_SEGMENTIO_WEBHOOK_SECRET: '' +UNIVERSITY_EMAIL: university@example.com +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME +VERIFY_STUDENT: + DAYS_GOOD_FOR: 365 + EXPIRING_SOON_WINDOW: 28 +VIDEO_CDN_URL: + EXAMPLE_COUNTRY_CODE: http://example.com/edx/video?s3_url= +VIDEO_IMAGE_MAX_AGE: 31536000 +VIDEO_IMAGE_SETTINGS: + DIRECTORY_PREFIX: video-images/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_IMAGE_MAX_BYTES: 2097152 + VIDEO_IMAGE_MIN_BYTES: 2048 +VIDEO_TRANSCRIPTS_MAX_AGE: 31536000 +VIDEO_TRANSCRIPTS_SETTINGS: + DIRECTORY_PREFIX: video-transcripts/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_TRANSCRIPTS_MAX_BYTES: 3145728 +VIDEO_UPLOAD_PIPELINE: + BUCKET: '' + ROOT_PATH: '' +WIKI_ENABLED: true +WRITABLE_GRADEBOOK_URL: null +XBLOCK_FS_STORAGE_BUCKET: null +XBLOCK_FS_STORAGE_PREFIX: null +XBLOCK_SETTINGS: {} +XQUEUE_INTERFACE: + basic_auth: + - edx + - edx + django_auth: + password: password + username: lms + url: http://edx.devstack.xqueue:18040 +X_FRAME_OPTIONS: DENY +YOUTUBE_API_KEY: PUT_YOUR_API_KEY_HERE +ZENDESK_API_KEY: '' +ZENDESK_CUSTOM_FIELDS: {} +ZENDESK_GROUP_ID_MAPPING: {} +ZENDESK_OAUTH_ACCESS_TOKEN: '' +ZENDESK_URL: '' +ZENDESK_USER: '' + diff --git a/docker/build/edxapp/studio.yml b/docker/build/edxapp/studio.yml new file mode 100644 index 00000000000..be56083c979 --- /dev/null +++ b/docker/build/edxapp/studio.yml @@ -0,0 +1,487 @@ +ACTIVATION_EMAIL_SUPPORT_LINK: '' +AFFILIATE_COOKIE_NAME: dev_affiliate_id +ALTERNATE_WORKER_QUEUES: lms +ANALYTICS_DASHBOARD_NAME: Your Platform Name Here Insights +ANALYTICS_DASHBOARD_URL: http://localhost:18110/courses +AUTH_PASSWORD_VALIDATORS: +- NAME: django.contrib.auth.password_validation.UserAttributeSimilarityValidator +- NAME: common.djangoapps.util.password_policy_validators.MinimumLengthValidator + OPTIONS: + min_length: 2 +- NAME: common.djangoapps.util.password_policy_validators.MaximumLengthValidator + OPTIONS: + max_length: 75 +AWS_ACCESS_KEY_ID: null +AWS_QUERYSTRING_AUTH: false +AWS_S3_CUSTOM_DOMAIN: SET-ME-PLEASE (ex. bucket-name.s3.amazonaws.com) +AWS_SECRET_ACCESS_KEY: null +AWS_SES_REGION_ENDPOINT: email.us-east-1.amazonaws.com +AWS_SES_REGION_NAME: us-east-1 +AWS_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +BASE_COOKIE_DOMAIN: localhost +BLOCKSTORE_API_URL: http://localhost:18250/api/v1 +BLOCKSTORE_PUBLIC_URL_ROOT: http://localhost:18250 +BLOCK_STRUCTURES_SETTINGS: + COURSE_PUBLISH_TASK_DELAY: 30 + PRUNING_ACTIVE: false + TASK_DEFAULT_RETRY_DELAY: 30 + TASK_MAX_RETRIES: 5 +BRANCH_IO_KEY: '' +BUGS_EMAIL: bugs@example.com +BULK_EMAIL_DEFAULT_FROM_EMAIL: no-reply@example.com +BULK_EMAIL_EMAILS_PER_TASK: 500 +BULK_EMAIL_LOG_SENT_EMAILS: false +CACHES: + celery: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: celery + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + configuration: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4 + LOCATION: + - edx.devstack.memcached:11211 + course_structure_cache: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: course_structure + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: default + LOCATION: + - edx.devstack.memcached:11211 + VERSION: '1' + general: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: general + LOCATION: + - edx.devstack.memcached:11211 + mongo_metadata_inheritance: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: mongo_metadata_inheritance + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: 300 + staticfiles: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: common.djangoapps.util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4_general + LOCATION: + - edx.devstack.memcached:11211 +CAS_ATTRIBUTE_CALLBACK: '' +CAS_EXTRA_LOGIN_PARAMS: '' +CAS_SERVER_URL: '' +CELERY_BROKER_HOSTNAME: localhost +CELERY_BROKER_PASSWORD: celery +CELERY_BROKER_TRANSPORT: amqp +CELERY_BROKER_USER: celery +CELERY_BROKER_USE_SSL: false +CELERY_BROKER_VHOST: '' +CELERY_EVENT_QUEUE_TTL: null +CELERY_TIMEZONE: UTC +CELERY_RESULT_BACKEND: django-cache +CERTIFICATE_TEMPLATE_LANGUAGES: + en: English + es: Español +CERT_QUEUE: certificates +CMS_BASE: edx.devstack.studio:18010 +CODE_JAIL: + limits: + CPU: 1 + FSIZE: 1048576 + PROXY: 0 + REALTIME: 3 + VMEM: 536870912 + python_bin: '' + user: sandbox +COMMENTS_SERVICE_KEY: password +COMMENTS_SERVICE_URL: http://localhost:18080 +COMPREHENSIVE_THEME_DIRS: +- '' +COMPREHENSIVE_THEME_LOCALE_PATHS: [] +CONTACT_EMAIL: info@example.com +CONTENTSTORE: + ADDITIONAL_OPTIONS: {} + DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.contentstore.mongo.MongoContentStore + OPTIONS: + auth_source: null + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + ssl: false + user: edxapp +CORS_ORIGIN_ALLOW_ALL: false +CORS_ORIGIN_WHITELIST: [] +COURSES_WITH_UNSAFE_CODE: [] +COURSE_ABOUT_VISIBILITY_PERMISSION: see_exists +COURSE_CATALOG_API_URL: http://localhost:8008/api/v1 +COURSE_CATALOG_VISIBILITY_PERMISSION: see_exists +COURSE_IMPORT_EXPORT_BUCKET: '' +CREDENTIALS_INTERNAL_SERVICE_URL: http://localhost:8005 +CREDENTIALS_PUBLIC_SERVICE_URL: http://localhost:8005 +CREDIT_PROVIDER_SECRET_KEYS: {} +CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: '' +CROSS_DOMAIN_CSRF_COOKIE_NAME: '' +CSRF_COOKIE_SECURE: false +CSRF_TRUSTED_ORIGINS: [] +DATABASES: + default: + ATOMIC_REQUESTS: true + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql57 + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + read_replica: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql57 + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + student_module_history: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql57 + NAME: edxapp_csmh + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 +DATA_DIR: /edx/var/edxapp +DEFAULT_COURSE_VISIBILITY_IN_CATALOG: both +DEFAULT_FEEDBACK_EMAIL: feedback@example.com +DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage +DEFAULT_FROM_EMAIL: registration@example.com +DEFAULT_JWT_ISSUER: + AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret +DEFAULT_MOBILE_AVAILABLE: false +DEFAULT_SITE_THEME: '' +DEPRECATED_ADVANCED_COMPONENT_TYPES: [] +DJFS: + directory_root: /edx/var/edxapp/django-pyfs/static/django-pyfs + type: osfs + url_root: /static/django-pyfs +DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp +ECOMMERCE_API_SIGNING_KEY: lms-secret +ECOMMERCE_API_URL: http://localhost:8002/api/v2 +ECOMMERCE_PUBLIC_URL_ROOT: http://localhost:8002 +EDXMKTG_USER_INFO_COOKIE_NAME: edx-user-info +EDX_PLATFORM_REVISION: master +ELASTIC_SEARCH_CONFIG: +- host: edx.devstack.elasticsearch710 + port: 9200 + use_ssl: false +EMAIL_BACKEND: django.core.mail.backends.smtp.EmailBackend +EMAIL_HOST: localhost +EMAIL_HOST_PASSWORD: '' +EMAIL_HOST_USER: '' +EMAIL_PORT: 25 +EMAIL_USE_TLS: false +ENABLE_COMPREHENSIVE_THEMING: false +ENTERPRISE_API_URL: http://edx.devstack.lms:18000/enterprise/api/v1 +ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: {} +ENTERPRISE_SERVICE_WORKER_USERNAME: enterprise_worker +EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST: [] +EXTRA_MIDDLEWARE_CLASSES: [] +FACEBOOK_API_VERSION: v2.1 +FACEBOOK_APP_ID: FACEBOOK_APP_ID +FACEBOOK_APP_SECRET: FACEBOOK_APP_SECRET +FEATURES: + AUTOMATIC_AUTH_FOR_TESTING: false + CUSTOM_COURSES_EDX: false + ENABLE_COMBINED_LOGIN_REGISTRATION: true + ENABLE_CORS_HEADERS: false + ENABLE_COUNTRY_ACCESS: false + ENABLE_CREDIT_API: false + ENABLE_CREDIT_ELIGIBILITY: false + ENABLE_CROSS_DOMAIN_CSRF_COOKIE: false + ENABLE_CSMH_EXTENDED: true + ENABLE_DISCUSSION_HOME_PANEL: true + ENABLE_DISCUSSION_SERVICE: true + ENABLE_EDXNOTES: true + ENABLE_ENROLLMENT_RESET: false + ENABLE_GRADE_DOWNLOADS: true + ENABLE_MKTG_SITE: false + ENABLE_MOBILE_REST_API: false + ENABLE_OAUTH2_PROVIDER: false + ENABLE_PUBLISHER: false + ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES: true + ENABLE_SPECIAL_EXAMS: false + ENABLE_SYSADMIN_DASHBOARD: false + ENABLE_THIRD_PARTY_AUTH: true + ENABLE_VIDEO_UPLOAD_PIPELINE: false + PREVIEW_LMS_BASE: preview.localhost:18000 + SHOW_FOOTER_LANGUAGE_SELECTOR: false + SHOW_HEADER_LANGUAGE_SELECTOR: false +FEEDBACK_SUBMISSION_EMAIL: '' +FERNET_KEYS: +- DUMMY KEY CHANGE BEFORE GOING TO PRODUCTION +FILE_UPLOAD_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +FILE_UPLOAD_STORAGE_PREFIX: submissions_attachments +FINANCIAL_REPORTS: + BUCKET: null + ROOT_PATH: sandbox + STORAGE_TYPE: localfs +FOOTER_ORGANIZATION_IMAGE: images/logo.png +GITHUB_REPO_ROOT: /edx/var/edxapp/data +GIT_REPO_EXPORT_DIR: /edx/var/edxapp/export_course_repos +GOOGLE_ANALYTICS_ACCOUNT: null +GRADES_DOWNLOAD: + BUCKET: '' + ROOT_PATH: '' + STORAGE_CLASS: django.core.files.storage.FileSystemStorage + STORAGE_KWARGS: + location: /tmp/edx-s3/grades + STORAGE_TYPE: '' +HELP_TOKENS_BOOKS: + course_author: http://edx.readthedocs.io/projects/open-edx-building-and-running-a-course + learner: http://edx.readthedocs.io/projects/open-edx-learner-guide +ICP_LICENSE: null +ICP_LICENSE_INFO: {} +IDA_LOGOUT_URI_LIST: [] +ID_VERIFICATION_SUPPORT_LINK: '' +JWT_AUTH: + JWT_AUDIENCE: lms-key + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUER: http://localhost:18000/oauth2 + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PRIVATE_SIGNING_JWK: None + JWT_PUBLIC_SIGNING_JWK_SET: '' + JWT_SECRET_KEY: lms-secret + JWT_SIGNING_ALGORITHM: null +JWT_EXPIRATION: 30 +JWT_ISSUER: http://localhost:18000/oauth2 +JWT_PRIVATE_SIGNING_KEY: null +LANGUAGE_CODE: en +LANGUAGE_COOKIE: openedx-language-preference +LEARNER_PORTAL_URL_ROOT: https://learner-portal-edx.devstack.lms:18000 +LMS_BASE: edx.devstack.lms:18000 +LMS_INTERNAL_ROOT_URL: http://edx.devstack.lms:18000 +LMS_ROOT_URL: http://edx.devstack.lms:18000 +LOCAL_LOGLEVEL: INFO +LOGGING_ENV: sandbox +LOGIN_REDIRECT_WHITELIST: [] +LOG_DIR: /edx/var/log/edx +MAINTENANCE_BANNER_TEXT: Sample banner message +MEDIA_ROOT: /edx/var/edxapp/media/ +MEDIA_URL: /media/ +MKTG_URLS: {} +MKTG_URL_LINK_MAP: {} +MOBILE_STORE_URLS: {} +MODULESTORE: + default: + ENGINE: xmodule.modulestore.mixed.MixedModuleStore + OPTIONS: + mappings: {} + stores: + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore + NAME: split + OPTIONS: + default_class: xmodule.hidden_block.HiddenBlock + fs_root: /edx/var/edxapp/data + render_template: common.djangoapps.edxmako.shortcuts.render_to_string + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.mongo.DraftMongoModuleStore + NAME: draft + OPTIONS: + default_class: xmodule.hidden_block.HiddenBlock + fs_root: /edx/var/edxapp/data + render_template: common.djangoapps.edxmako.shortcuts.render_to_string +ORA2_FILE_PREFIX: default_env-default_deployment/ora2 +PARSE_KEYS: {} +PARTNER_SUPPORT_EMAIL: '' +PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: + ENFORCE_COMPLIANCE_ON_LOGIN: false +PASSWORD_RESET_SUPPORT_LINK: '' +PAYMENT_SUPPORT_EMAIL: billing@example.com +PLATFORM_DESCRIPTION: Your Platform Description Here +PLATFORM_FACEBOOK_ACCOUNT: http://www.facebook.com/YourPlatformFacebookAccount +PLATFORM_NAME: Your Platform Name Here +PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount' +POLICY_CHANGE_GRADES_ROUTING_KEY: edx.lms.core.default +SINGLE_LEARNER_COURSE_REGRADE_ROUTING_KEY: edx.lms.core.default +PRESS_EMAIL: press@example.com +PROCTORING_BACKENDS: + DEFAULT: 'null' + 'null': {} +PROCTORING_SETTINGS: {} +REGISTRATION_EXTRA_FIELDS: + city: hidden + confirm_email: hidden + country: required + gender: optional + goals: optional + honor_code: required + level_of_education: optional + mailing_address: hidden + terms_of_service: hidden + year_of_birth: optional +RETIRED_EMAIL_DOMAIN: retired.invalid +RETIRED_EMAIL_PREFIX: retired__user_ +RETIRED_USERNAME_PREFIX: retired__user_ +RETIRED_USER_SALTS: +- OVERRIDE ME WITH A RANDOM VALUE +- ROTATE SALTS BY APPENDING NEW VALUES +RETIREMENT_SERVICE_WORKER_USERNAME: retirement_worker +RETIREMENT_STATES: +- PENDING +- ERRORED +- ABORTED +- COMPLETE +SECRET_KEY: DUMMY KEY ONLY FOR TO DEVSTACK +SEGMENT_KEY: null +SERVER_EMAIL: sre@example.com +SESSION_COOKIE_DOMAIN: '' +SESSION_COOKIE_NAME: sessionid +SESSION_COOKIE_SECURE: false +SESSION_SAVE_EVERY_REQUEST: false +SITE_NAME: localhost +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '' +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT: {} +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: '' +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT: {} +SOCIAL_MEDIA_FOOTER_URLS: {} +SOCIAL_SHARING_SETTINGS: + CERTIFICATE_FACEBOOK: false + CERTIFICATE_TWITTER: false + CUSTOM_COURSE_URLS: false + DASHBOARD_FACEBOOK: false + DASHBOARD_TWITTER: false +STATIC_ROOT_BASE: /edx/var/edxapp/staticfiles +STATIC_URL_BASE: /static/ +STUDIO_NAME: Studio +STUDIO_SHORT_NAME: Studio +SUPPORT_SITE_LINK: '' +SWIFT_AUTH_URL: null +SWIFT_AUTH_VERSION: null +SWIFT_KEY: null +SWIFT_REGION_NAME: null +SWIFT_TEMP_URL_DURATION: 1800 +SWIFT_TEMP_URL_KEY: null +SWIFT_TENANT_ID: null +SWIFT_TENANT_NAME: null +SWIFT_USERNAME: null +SWIFT_USE_TEMP_URLS: false +SYSLOG_SERVER: '' +SYSTEM_WIDE_ROLE_CLASSES: [] +TECH_SUPPORT_EMAIL: technical@example.com +TIME_ZONE: America/New_York +UNIVERSITY_EMAIL: university@example.com +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME +VIDEO_IMAGE_MAX_AGE: 31536000 +VIDEO_IMAGE_SETTINGS: + DIRECTORY_PREFIX: video-images/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_IMAGE_MAX_BYTES: 2097152 + VIDEO_IMAGE_MIN_BYTES: 2048 +VIDEO_TRANSCRIPTS_MAX_AGE: 31536000 +VIDEO_TRANSCRIPTS_SETTINGS: + DIRECTORY_PREFIX: video-transcripts/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_TRANSCRIPTS_MAX_BYTES: 3145728 +VIDEO_UPLOAD_PIPELINE: + BUCKET: '' + ROOT_PATH: '' +WIKI_ENABLED: true +XBLOCK_FS_STORAGE_BUCKET: null +XBLOCK_FS_STORAGE_PREFIX: null +XBLOCK_SETTINGS: {} +XQUEUE_INTERFACE: + basic_auth: + - edx + - edx + django_auth: + password: password + username: lms + url: http://edx.devstack.xqueue:18040 +X_FRAME_OPTIONS: DENY +YOUTUBE_API_KEY: PUT_YOUR_API_KEY_HERE +ZENDESK_API_KEY: '' +ZENDESK_CUSTOM_FIELDS: {} +ZENDESK_GROUP_ID_MAPPING: {} +ZENDESK_OAUTH_ACCESS_TOKEN: '' +ZENDESK_URL: '' +ZENDESK_USER: '' + diff --git a/docker/build/elasticsearch-devstack/Dockerfile b/docker/build/elasticsearch-devstack/Dockerfile new file mode 100644 index 00000000000..b851031f026 --- /dev/null +++ b/docker/build/elasticsearch-devstack/Dockerfile @@ -0,0 +1,7 @@ +# docker build -f docker/build/elasticsearch-devstack/Dockerfile . -t edxops/elasticsearch:devstack + +FROM elasticsearch:1.5.2 +LABEL maintainer="edxops" + +# Install the elastcisearch-head plugin (https://mobz.github.io/elasticsearch-head/) +RUN /usr/share/elasticsearch/bin/plugin -install mobz/elasticsearch-head diff --git a/docker/build/elasticsearch/Dockerfile b/docker/build/elasticsearch/Dockerfile new file mode 100644 index 00000000000..c869014eccb --- /dev/null +++ b/docker/build/elasticsearch/Dockerfile @@ -0,0 +1,13 @@ +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +# Role is currently untagged +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook elasticsearch.yml -c local \ + -i '127.0.0.1,' + +WORKDIR /etc/elasticsearch +CMD service elasticsearch start && sleep 5 && tail -f /edx/var/log/elasticsearch/elasticsearch.log +EXPOSE 9200 9300 diff --git a/docker/build/enterprise_catalog/Dockerfile b/docker/build/enterprise_catalog/Dockerfile new file mode 100644 index 00000000000..b3caf4af29f --- /dev/null +++ b/docker/build/enterprise_catalog/Dockerfile @@ -0,0 +1,32 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/enterprise_catalog/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/xenial-common:${BASE_IMAGE_TAG} +MAINTAINER edxops +USER root +CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/enterprise_catalog/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/enterprise_catalog/enterprise_catalog.yml /edx/etc/enterprise_catalog.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook enterprise_catalog.yml \ + -c local -i "127.0.0.1," \ + -t "install,assets,devstack" \ + --extra-vars="ENTERPRISE_CATALOG_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18160 diff --git a/docker/build/enterprise_catalog/ansible_overrides.yml b/docker/build/enterprise_catalog/ansible_overrides.yml new file mode 100644 index 00000000000..3c1583c7313 --- /dev/null +++ b/docker/build/enterprise_catalog/ansible_overrides.yml @@ -0,0 +1,14 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ ENTERPRISE_CATALOG_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ ENTERPRISE_CATALOG_MYSQL_PASSWORD }}' + +ENTERPRISE_CATALOG_MYSQL_HOST: 'edx.devstack.mysql' +ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE: 'enterprise_catalog.settings.devstack' +ENTERPRISE_CATALOG_GUNICORN_EXTRA: '--reload' +ENTERPRISE_CATALOG_MEMCACHE: ['edx.devstack.memcached:11211'] +ENTERPRISE_CATALOG_EXTRA_APPS: [] +ENTERPRISE_CATALOG_URL_ROOT: 'http://enterprise-catalog:18160/' + +edx_django_service_is_devstack: true diff --git a/docker/build/enterprise_catalog/enterprise_catalog.yml b/docker/build/enterprise_catalog/enterprise_catalog.yml new file mode 100644 index 00000000000..89cc293c26d --- /dev/null +++ b/docker/build/enterprise_catalog/enterprise_catalog.yml @@ -0,0 +1,62 @@ +--- + +API_ROOT: null +BACKEND_SERVICE_EDX_OAUTH2_KEY: enterprise-catalog-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: enterprise-catalog-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: enterprise_catalog + LOCATION: + - edx.devstack.memcached:11211 +CELERY_ALWAYS_EAGER: false +CELERY_BROKER_HOSTNAME: '' +CELERY_BROKER_PASSWORD: '' +CELERY_BROKER_TRANSPORT: '' +CELERY_BROKER_USER: '' +CELERY_BROKER_VHOST: '' +CELERY_DEFAULT_EXCHANGE: enterprise_catalog +CELERY_DEFAULT_QUEUE: enterprise_catalog.default +CELERY_DEFAULT_ROUTING_KEY: enterprise_catalog +CERTIFICATE_LANGUAGES: null +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ATOMIC_REQUESTS: false + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: enterprise_catalog + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: entcatalog001 +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +ENTERPRISE_CATALOG_SERVICE_USER: enterprise_catalog_service_user +EXTRA_APPS: [] +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +MEDIA_STORAGE_BACKEND: '' +SECRET_KEY: SET-ME-PLEASE +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: enterprise-catalog-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: enterprise-catalog-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage +STATIC_ROOT: /edx/var/enterprise_catalog/staticfiles +TIME_ZONE: UTC diff --git a/docker/build/firefox/Dockerfile b/docker/build/firefox/Dockerfile new file mode 100644 index 00000000000..043a086589d --- /dev/null +++ b/docker/build/firefox/Dockerfile @@ -0,0 +1,20 @@ +FROM selenium/standalone-firefox-debug:3.14.0-arsenic +LABEL maintainer="edxops" + +USER root + +# Install a password generator and the codecs needed to support mp4 video in Firefox +RUN apt-get update -qqy \ + && apt-get -qqy install \ + gstreamer1.0-libav \ + pwgen \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/* + +USER seluser + +CMD export VNC_PASSWORD=$(pwgen -s -1 $(shuf -i 10-20 -n 1)) \ + && x11vnc -storepasswd $VNC_PASSWORD /home/seluser/.vnc/passwd \ + && echo "Firefox VNC password: $VNC_PASSWORD" \ + && /opt/bin/entry_point.sh + +EXPOSE 4444 5900 diff --git a/docker/build/flower/Dockerfile b/docker/build/flower/Dockerfile new file mode 100644 index 00000000000..93a23d99e9b --- /dev/null +++ b/docker/build/flower/Dockerfile @@ -0,0 +1,25 @@ +FROM ubuntu:focal + +# Update and get pip. +RUN apt-get update && apt-get install -y python3-pip + +# Install the required packages +RUN pip3 install --no-cache-dir celery==5.2.3 flower==1.0.0 redis==4.1.1 + +# PYTHONUNBUFFERED: Force stdin, stdout and stderr to be totally unbuffered. (equivalent to `python -u`) +# PYTHONHASHSEED: Enable hash randomization (equivalent to `python -R`) +# PYTHONDONTWRITEBYTECODE: Do not write byte files to disk, since we maintain it as readonly. (equivalent to `python -B`) +ENV PYTHONUNBUFFERED=1 PYTHONHASHSEED=random PYTHONDONTWRITEBYTECODE=1 + +# Default port +EXPOSE 5555 + +RUN apt-get install bash -qy + +# Run as a non-root user by default, run as user with least privileges. +USER nobody + +# Mount a config here if you want to enable OAuth etc +ADD docker/build/flower/flowerconfig.py /flowerconfig.py + +ENTRYPOINT [ "celery" ] diff --git a/docker/build/flower/README.txt b/docker/build/flower/README.txt new file mode 100644 index 00000000000..0104a734622 --- /dev/null +++ b/docker/build/flower/README.txt @@ -0,0 +1,9 @@ +Example: +$ docker build . -t edxops/flower:latest +$ docker run -it --rm -p 127.0.0.1:5555:5555 edxops/flower:latest --broker=redis://:@some-redis-url.com:6379 flower --conf=flowerconfig.py + +$ curl localhost:5555 + + +Example with oauth: +docker run -it --rm -p 127.0.0.1:5555:5555 -e OAUTH2_KEY="xxxyyy.apps.googleusercontent.com" -e OAUTH2_SECRET="xxxxx" -e OAUTH2_REDIRECT_URI="flower-url.com/login" -e AUTH=".*@domain.org" edxops/flower:latest flower --broker=redis://myuser:mypass@my-redis.com:6379 diff --git a/docker/build/flower/flowerconfig.py b/docker/build/flower/flowerconfig.py new file mode 100644 index 00000000000..077667112ea --- /dev/null +++ b/docker/build/flower/flowerconfig.py @@ -0,0 +1,9 @@ +import os + +address = os.getenv('ADDRESS', "0.0.0.0") +port = os.getenv('PORT', 5555) + +oauth2_key = os.getenv('OAUTH2_KEY', None) +oauth2_secret = os.getenv('OAUTH2_SECRET', None) +oauth2_redirect_uri = os.getenv('OAUTH2_REDIRECT_URI', None) +auth = os.getenv('AUTH', None) diff --git a/docker/build/focal-common/Dockerfile b/docker/build/focal-common/Dockerfile new file mode 100644 index 00000000000..e999496cee6 --- /dev/null +++ b/docker/build/focal-common/Dockerfile @@ -0,0 +1,30 @@ +ARG BASE_IMAGE_TAG=latest +FROM ubuntu:focal +LABEL maintainer="edxops" + +# Set locale to UTF-8 which is not the default for docker. +# See the links for details: +# http://jaredmarkell.com/docker-and-locales/ +# https://github.com/docker-library/python/issues/13 +# https://github.com/docker-library/python/pull/14/files +# Also install software-properties-common to get apt-add-repository +# Also install iproute2 because ansible needs it for the ansible_default_ipv4.address fact +RUN apt-get update &&\ + apt-get install -y cron iproute2 locales software-properties-common &&\ + locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +ENV CONFIGURATION_REPO="https://github.com/openedx/configuration.git" +ARG OPENEDX_RELEASE=master +ENV CONFIGURATION_VERSION="${OPENEDX_RELEASE}" + +# Add the deadsnakes PPA to install Python 3.8 +RUN apt-add-repository -y ppa:deadsnakes/ppa +RUN apt-get update &&\ + apt-get install -y python3.8-dev python3.8-distutils + +ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh +RUN chmod +x /tmp/ansible-bootstrap.sh +RUN /tmp/ansible-bootstrap.sh diff --git a/docker/build/forum/Dockerfile b/docker/build/forum/Dockerfile new file mode 100644 index 00000000000..900eeb7948f --- /dev/null +++ b/docker/build/forum/Dockerfile @@ -0,0 +1,28 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/forum/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" + +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays +ADD . /edx/app/edx_ansible/edx_ansible +COPY docker/build/forum/ansible_overrides.yml / + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook forum.yml \ + -i '127.0.0.1,' -c local \ + -t "install:base,install:configuration,install:app-requirements,install:code,devstack:install" \ + --extra-vars="FORUM_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" +WORKDIR /edx/app +ENTRYPOINT ["/edx/app/forum/devstack.sh"] +CMD ["start"] +EXPOSE 4567 diff --git a/docker/build/forum/ansible_overrides.yml b/docker/build/forum/ansible_overrides.yml new file mode 100644 index 00000000000..95475f54656 --- /dev/null +++ b/docker/build/forum/ansible_overrides.yml @@ -0,0 +1,13 @@ +--- + +FLOCK_TLD: "edx" + +FORUM_MONGO_HOSTS: + - mongo.{{ FLOCK_TLD }} + +FORUM_ELASTICSEARCH_HOST: "es.{{ FLOCK_TLD }}" +FORUM_USE_TCP: "true" +FORUM_RACK_ENV: "staging" +FORUM_SINATRA_ENV: "staging" + +devstack: "true" diff --git a/docker/build/github-actions-runner/Dockerfile b/docker/build/github-actions-runner/Dockerfile new file mode 100644 index 00000000000..b4d6a5a65e9 --- /dev/null +++ b/docker/build/github-actions-runner/Dockerfile @@ -0,0 +1,53 @@ +FROM ubuntu:focal + +# Set the github runner version +ARG RUNNER_VERSION="2.304.0" + +ENV GITHUB_ORGANIZATION="" +ENV GITHUB_ACCESS_TOKEN="" + +# Update and install necessary packages +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl iputils-ping jq software-properties-common build-essential libssl-dev \ + libffi-dev python3 python3-venv python3-dev libmysqlclient-dev yamllint + +# Add a github action runner user +RUN useradd -m actions-runner + +# cd into the user directory, download and unzip the github actions runner +RUN cd /home/actions-runner && mkdir actions-runner && cd actions-runner \ + && curl -O -L https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-x64-${RUNNER_VERSION}.tar.gz \ + && tar xzf ./actions-runner-linux-x64-${RUNNER_VERSION}.tar.gz \ + && ./bin/installdependencies.sh \ + && chown -R actions-runner ~actions-runner \ + && rm -rf /var/lib/apt/lists/* \ + && apt-get clean + +# Install sudo to execute commands with root privileges +RUN apt-get update && apt-get install -y sudo + +# Install packages needed for Docker installation +RUN apt-get update && apt-get install -y apt-transport-https gnupg-agent + +# Add Docker's GPG key and repository +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - && \ + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" + +# Install Docker - for Docker CI running by github action runner +RUN apt-get update && apt-get install -y docker-ce + +# Add the github actions-runner user to the docker group to allow the github action runner user to run docker commands. +# See: https://docs.docker.com/engine/installation/linux/ubuntulinux/ +RUN usermod -aG docker actions-runner + +# copy over the actions-runner.sh script +ADD docker/build/github-actions-runner/actions-runner.sh /actions-runner.sh + +# make the script executable +RUN chmod +x actions-runner.sh + +#set the user to "actions-runner" so all subsequent commands are run as the actions-runner user +USER actions-runner + +# set the entrypoint to the actions-runner.sh script +ENTRYPOINT ["./actions-runner.sh"] diff --git a/docker/build/github-actions-runner/README.rst b/docker/build/github-actions-runner/README.rst new file mode 100644 index 00000000000..06f3fb9ab69 --- /dev/null +++ b/docker/build/github-actions-runner/README.rst @@ -0,0 +1,11 @@ +Usage +##### + +Create image: + + - This must be run from the root of the configuration repository + - ``docker build -f docker/build/github-actions-runner/Dockerfile . -t openedx/github-actions-runner`` + +Start the container with this: + +``docker run -ti -v /var/lib/docker.sock:/var/lib/docker.sock -e GITHUB_ACCESS_TOKEN=xxxxxxxx -e GITHUB_ORGANIZATION=abc openedx/github-actions-runner`` diff --git a/docker/build/github-actions-runner/actions-runner.sh b/docker/build/github-actions-runner/actions-runner.sh new file mode 100644 index 00000000000..602a30af4e9 --- /dev/null +++ b/docker/build/github-actions-runner/actions-runner.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +GITHUB_ORGANIZATION=$GITHUB_ORGANIZATION +GITHUB_ACCESS_TOKEN=$GITHUB_ACCESS_TOKEN + +RUNNER_TOKEN=$(curl -sX POST -H "Authorization: token ${GITHUB_ACCESS_TOKEN}" https://api.github.com/orgs/${GITHUB_ORGANIZATION}/actions/runners/registration-token | jq .token --raw-output) + +cd /home/actions-runner/actions-runner + +./config.sh --url https://github.com/${GITHUB_ORGANIZATION} --token ${RUNNER_TOKEN} --unattended --replace + +cleanup() { + echo "Removing runner..." + ./config.sh remove --unattended --token ${RUNNER_TOKEN} +} + +trap 'cleanup; exit 130' INT +trap 'cleanup; exit 143' TERM + +./bin/runsvc.sh & wait $! diff --git a/docker/build/graphite/Dockerfile b/docker/build/graphite/Dockerfile new file mode 100644 index 00000000000..33b89792bb0 --- /dev/null +++ b/docker/build/graphite/Dockerfile @@ -0,0 +1,9 @@ +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +USER root +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook edx-monitoring.yml -c local \ + -i '127.0.0.1,' diff --git a/docker/build/jenkins_analytics/Dockerfile.noci b/docker/build/jenkins_analytics/Dockerfile.noci new file mode 100644 index 00000000000..a5c4c1fc00b --- /dev/null +++ b/docker/build/jenkins_analytics/Dockerfile.noci @@ -0,0 +1,10 @@ +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +USER root +RUN apt-get update + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays +COPY docker/build/jenkins_analytics/ansible_overrides.yml / +RUN PYTHONUNBUFFERED=1 /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook -v jenkins_analytics.yml -i '127.0.0.1,' -c local -e@/ansible_overrides.yml diff --git a/docker/build/jenkins_analytics/ansible_overrides.yml b/docker/build/jenkins_analytics/ansible_overrides.yml new file mode 100644 index 00000000000..9e2899cd8cf --- /dev/null +++ b/docker/build/jenkins_analytics/ansible_overrides.yml @@ -0,0 +1,28 @@ +--- + +JENKINS_ANALYTICS_USER_PASSWORD_PLAIN: aaaaa + +JENKINS_ANALYTICS_GITHUB_CREDENTIAL_USER: 'aaa-secure' +JENKINS_ANALYTICS_GITHUB_CREDENTIAL_KEY: | + -----BEGIN RSA PRIVATE KEY----- + -----END RSA PRIVATE KEY----- + +ANALYTICS_SCHEDULE_COMMON_VARS: "@{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/job-configs/common.yml" +ANALYTICS_SCHEDULE_COURSE_ACTIVITY_WEEKLY_EXTRA_VARS: "@{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/job-configs/course_activity.yml" +ANALYTICS_SCHEDULE_ANSWER_DISTRIBUTION_EXTRA_VARS: "@{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/job-configs/answer_distribution.yml" +ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS: "@{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/job-configs/enrollments.yml" +ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_ALL_VIDEO_EXTRA_VARS: "@{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/job-configs/video.yml" +ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_COURSE_ENROLL_BY_COUNTRY_EXTRA_VARS: "@{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/job-configs/geolocation.yml" + + +#ANALYTICS_SCHEDULE_JOBS_DSL_REPO_URL: "we use the default, but might need to override it someday" +#ANALYTICS_SCHEDULE_JOBS_DSL_REPO_VERSION: "master" + +ANALYTICS_SCHEDULE_SECURE_REPO_URL: "git@github.com:something/something-secure.git" +#ANALYTICS_SCHEDULE_SECURE_REPO_VERSION: "master" # we use the default, but for testing this is helpful to override + +ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_KEY: | + -----BEGIN RSA PRIVATE KEY----- + -----END RSA PRIVATE KEY----- + +JENKINS_ANALYTICS_AUTH_REALM: unix diff --git a/docker/build/mongo/Dockerfile b/docker/build/mongo/Dockerfile new file mode 100644 index 00000000000..520014793a9 --- /dev/null +++ b/docker/build/mongo/Dockerfile @@ -0,0 +1,15 @@ +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +ADD . /edx/app/edx_ansible/edx_ansible +COPY docker/build/mongo/ansible_overrides.yml / + +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook mongo.yml \ + -i '127.0.0.1,' -c local \ + -t 'install' \ + -e@/ansible_overrides.yml + +WORKDIR /edx/app +EXPOSE 27017 diff --git a/docker/build/mongo/ansible_overrides.yml b/docker/build/mongo/ansible_overrides.yml new file mode 100644 index 00000000000..0967ef424bc --- /dev/null +++ b/docker/build/mongo/ansible_overrides.yml @@ -0,0 +1 @@ +{} diff --git a/docker/build/mysql/Dockerfile b/docker/build/mysql/Dockerfile new file mode 100644 index 00000000000..28f8551ce71 --- /dev/null +++ b/docker/build/mysql/Dockerfile @@ -0,0 +1,108 @@ +FROM debian:buster-slim + +# add gosu for easy step-down from root +# https://github.com/tianon/gosu/releases +ENV GOSU_VERSION 1.14 + +ENV MYSQL_MAJOR 5.7 +ENV MYSQL_MINOR ${MYSQL_MAJOR}.39 +ENV MYSQL_VERSION 5.7.39-1debian10 + +RUN set -eux; \ + # add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added + groupadd -r mysql && useradd -r -g mysql mysql; \ + export DEBIAN_FRONTEND=noninteractive; \ + apt-get update && apt-get install -y --no-install-recommends gnupg && rm -rf /var/lib/apt/lists/*; \ + savedAptMark="$(apt-mark showmanual)"; \ + apt-get update; \ + apt-get install -y --no-install-recommends ca-certificates wget; \ + rm -rf /var/lib/apt/lists/*; \ + dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')"; \ + wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$%7BGOSU_VERSION%7D/gosu-$dpkgArch"; \ + wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/$%7BGOSU_VERSION%7D/gosu-$dpkgArch.asc"; \ + export GNUPGHOME="$(mktemp -d)"; \ + gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4; \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu; \ + gpgconf --kill all; \ + rm -rf "$GNUPGHOME" /usr/local/bin/gosu.asc; \ + apt-mark auto '.*' > /dev/null; \ + [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark > /dev/null; \ + apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ + chmod +x /usr/local/bin/gosu; \ + gosu --version; \ + gosu nobody true; \ + mkdir /docker-entrypoint-initdb.d; \ + DEVELOPMENT_PACKAGES="pkg-config libssl-dev dpkg-dev devscripts equivs"; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ + bzip2 \ + openssl \ + # FATAL ERROR: please install the following Perl modules before executing /usr/local/mysql/scripts/mysql_install_db: + # File::Basename + # File::Copy + # Sys::Hostname + # Data::Dumper + perl \ + xz-utils \ + zstd \ + ${DEVELOPMENT_PACKAGES}; \ + rm -rf /var/lib/apt/lists/*; \ + # gpg: key 3A79BD29: public key "MySQL Release Engineering " imported + key='859BE8D7C586F538430B19C2467B942D3A79BD29'; \ + export GNUPGHOME="$(mktemp -d)"; \ + gpg --batch --keyserver keyserver.ubuntu.com --recv-keys "$key"; \ + mkdir -p /etc/apt/keyrings; \ + gpg --batch --export "$key" > /etc/apt/keyrings/mysql.gpg; \ + gpgconf --kill all; \ + rm -rf "$GNUPGHOME"; \ + # RUN echo 'deb [ signed-by=/etc/apt/keyrings/mysql.gpg ] http://repo.mysql.com/apt/debian/ buster mysql-5.7' > /etc/apt/sources.list.d/mysql.list + echo 'deb [ signed-by=/etc/apt/keyrings/mysql.gpg ] http://repo.mysql.com/apt/debian/ buster mysql-5.7' > /etc/apt/sources.list.d/mysql.list; \ + echo 'deb-src [ signed-by=/etc/apt/keyrings/mysql.gpg ] http://repo.mysql.com/apt/debian/ buster mysql-5.7' >> /etc/apt/sources.list.d/mysql.list; \ +# the "/var/lib/mysql" stuff here is because the mysql-server postinst doesn't have an explicit way to disable the mysql_install_db codepath besides having a database already "configured" (ie, stuff in /var/lib/mysql/mysql) +# also, we set debconf keys to make APT a little quieter + { \ + echo mysql-community-server mysql-community-server/data-dir select ''; \ + echo mysql-community-server mysql-community-server/root-pass password ''; \ + echo mysql-community-server mysql-community-server/re-root-pass password ''; \ + echo mysql-community-server mysql-community-server/remove-test-db select false; \ + } | debconf-set-selections; \ + apt-get update; \ + mk-build-deps mysql-community-server --install --tool "apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends -y"; \ + mkdir /usr/src/mysql; \ + cd /usr/src/mysql; \ + apt source mysql-community-server; \ + cd mysql-community-*; \ + sed -i -e "s/\-DWITH_BOOST\=\/usr\/global\/share/\-DWITH_BOOST\=\/tmp\/boost \-DDOWNLOAD_BOOST\=1/g" debian/rules; \ + sed -i -e "s/\-DWITH_SSL\=.* /-DWITH_SSL\=system /g" debian/rules; \ + debuild -b -uc -us; \ + cd ~; \ + dpkg --install \ + /usr/src/mysql/mysql-common_*.deb \ + /usr/src/mysql/mysql-community-server_*.deb \ + /usr/src/mysql/mysql-community-client_*.deb \ + /usr/src/mysql/mysql-server_*.deb \ + /usr/src/mysql/mysql-client_*.deb; \ + rm -rf /usr/src/mysql /tmp/boost /mysql-community-build-deps*deb; \ + apt-get remove --purge -y mysql-community-build-deps ${DEVELOPMENT_PACKAGES}; \ + apt-get -y autoremove; \ + apt-get clean all; \ + # comment out a few problematic configuration values + find /etc/mysql/ -name '*.cnf' -print0 \ + | xargs -0 grep -lZE '^(bind-address|log)' \ + | xargs -rt -0 sed -Ei 's/^(bind-address|log)/#&/'; \ + # don't reverse lookup hostnames, they are usually another container + echo '[mysqld]\nskip-host-cache\nskip-name-resolve' > /etc/mysql/conf.d/docker.cnf; \ + rm -rf /var/lib/apt/lists/*; \ + rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql /var/run/mysqld; \ + chown -R mysql:mysql /var/lib/mysql /var/run/mysqld; \ + # ensure that /var/run/mysqld (used for socket and lock files) is writable regardless of the UID our mysqld instance ends up having at runtime + chmod 1777 /var/run/mysqld /var/lib/mysql + +VOLUME /var/lib/mysql + +COPY docker-entrypoint.sh /usr/local/bin/ +RUN ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat +ENTRYPOINT ["docker-entrypoint.sh"] + +EXPOSE 3306 33060 +CMD ["mysqld"] diff --git a/docker/build/mysql/ansible_overrides.yml b/docker/build/mysql/ansible_overrides.yml new file mode 100644 index 00000000000..ba2d8bcbbc7 --- /dev/null +++ b/docker/build/mysql/ansible_overrides.yml @@ -0,0 +1,3 @@ +--- +FLOCK_TLD: "edx" + diff --git a/docker/build/mysql/docker-entrypoint.sh b/docker/build/mysql/docker-entrypoint.sh new file mode 100644 index 00000000000..5819a38e93a --- /dev/null +++ b/docker/build/mysql/docker-entrypoint.sh @@ -0,0 +1,437 @@ +#!/bin/bash +set -eo pipefail +shopt -s nullglob + +# logging functions +mysql_log() { + local type="$1"; shift + # accept argument string or stdin + local text="$*"; if [ "$#" -eq 0 ]; then text="$(cat)"; fi + local dt; dt="$(date --rfc-3339=seconds)" + printf '%s [%s] [Entrypoint]: %s\n' "$dt" "$type" "$text" +} +mysql_note() { + mysql_log Note "$@" +} +mysql_warn() { + mysql_log Warn "$@" >&2 +} +mysql_error() { + mysql_log ERROR "$@" >&2 + exit 1 +} + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + mysql_error "Both $var and $fileVar are set (but are exclusive)" + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +# check to see if this file is being run or sourced from another script +_is_sourced() { + # https://unix.stackexchange.com/a/215279 + [ "${#FUNCNAME[@]}" -ge 2 ] \ + && [ "${FUNCNAME[0]}" = '_is_sourced' ] \ + && [ "${FUNCNAME[1]}" = 'source' ] +} + +# usage: docker_process_init_files [file [file [...]]] +# ie: docker_process_init_files /always-initdb.d/* +# process initializer files, based on file extensions +docker_process_init_files() { + # mysql here for backwards compatibility "${mysql[@]}" + mysql=( docker_process_sql ) + + echo + local f + for f; do + case "$f" in + *.sh) + # https://github.com/docker-library/postgres/issues/450#issuecomment-393167936 + # https://github.com/docker-library/postgres/pull/452 + if [ -x "$f" ]; then + mysql_note "$0: running $f" + "$f" + else + mysql_note "$0: sourcing $f" + . "$f" + fi + ;; + *.sql) mysql_note "$0: running $f"; docker_process_sql < "$f"; echo ;; + *.sql.bz2) mysql_note "$0: running $f"; bunzip2 -c "$f" | docker_process_sql; echo ;; + *.sql.gz) mysql_note "$0: running $f"; gunzip -c "$f" | docker_process_sql; echo ;; + *.sql.xz) mysql_note "$0: running $f"; xzcat "$f" | docker_process_sql; echo ;; + *.sql.zst) mysql_note "$0: running $f"; zstd -dc "$f" | docker_process_sql; echo ;; + *) mysql_warn "$0: ignoring $f" ;; + esac + echo + done +} + +# arguments necessary to run "mysqld --verbose --help" successfully (used for testing configuration validity and for extracting default/configured values) +_verboseHelpArgs=( + --verbose --help + --log-bin-index="$(mktemp -u)" # https://github.com/docker-library/mysql/issues/136 +) + +mysql_check_config() { + local toRun=( "$@" "${_verboseHelpArgs[@]}" ) errors + if ! errors="$("${toRun[@]}" 2>&1 >/dev/null)"; then + mysql_error $'mysqld failed while attempting to check config\n\tcommand was: '"${toRun[*]}"$'\n\t'"$errors" + fi +} + +# Fetch value from server config +# We use mysqld --verbose --help instead of my_print_defaults because the +# latter only show values present in config files, and not server defaults +mysql_get_config() { + local conf="$1"; shift + "$@" "${_verboseHelpArgs[@]}" 2>/dev/null \ + | awk -v conf="$conf" '$1 == conf && /^[^ \t]/ { sub(/^[^ \t]+[ \t]+/, ""); print; exit }' + # match "datadir /some/path with/spaces in/it here" but not "--xyz=abc\n datadir (xyz)" +} + +# Ensure that the package default socket can also be used +# since rpm packages are compiled with a different socket location +# and "mysqlsh --mysql" doesn't read the [client] config +# related to https://github.com/docker-library/mysql/issues/829 +mysql_socket_fix() { + local defaultSocket + defaultSocket="$(mysql_get_config 'socket' mysqld --no-defaults)" + if [ "$defaultSocket" != "$SOCKET" ]; then + ln -sfTv "$SOCKET" "$defaultSocket" || : + fi +} + +# Do a temporary startup of the MySQL server, for init purposes +docker_temp_server_start() { + if [ "${MYSQL_MAJOR}" = '5.7' ]; then + "$@" --skip-networking --default-time-zone=SYSTEM --socket="${SOCKET}" & + mysql_note "Waiting for server startup" + local i + for i in {30..0}; do + # only use the root password if the database has already been initialized + # so that it won't try to fill in a password file when it hasn't been set yet + extraArgs=() + if [ -z "$DATABASE_ALREADY_EXISTS" ]; then + extraArgs+=( '--dont-use-mysql-root-password' ) + fi + if docker_process_sql "${extraArgs[@]}" --database=mysql <<<'SELECT 1' &> /dev/null; then + break + fi + sleep 1 + done + if [ "$i" = 0 ]; then + mysql_error "Unable to start server." + fi + else + # For 5.7+ the server is ready for use as soon as startup command unblocks + if ! "$@" --daemonize --skip-networking --default-time-zone=SYSTEM --socket="${SOCKET}"; then + mysql_error "Unable to start server." + fi + fi +} + +# Stop the server. When using a local socket file mysqladmin will block until +# the shutdown is complete. +docker_temp_server_stop() { + if ! mysqladmin --defaults-extra-file=<( _mysql_passfile ) shutdown -uroot --socket="${SOCKET}"; then + mysql_error "Unable to shut down server." + fi +} + +# Verify that the minimally required password settings are set for new databases. +docker_verify_minimum_env() { + if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" -a -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then + mysql_error <<-'EOF' + Database is uninitialized and password option is not specified + You need to specify one of the following: + - MYSQL_ROOT_PASSWORD + - MYSQL_ALLOW_EMPTY_PASSWORD + - MYSQL_RANDOM_ROOT_PASSWORD + EOF + fi + + # This will prevent the CREATE USER from failing (and thus exiting with a half-initialized database) + if [ "$MYSQL_USER" = 'root' ]; then + mysql_error <<-'EOF' + MYSQL_USER="root", MYSQL_USER and MYSQL_PASSWORD are for configuring a regular user and cannot be used for the root user + Remove MYSQL_USER="root" and use one of the following to control the root user password: + - MYSQL_ROOT_PASSWORD + - MYSQL_ALLOW_EMPTY_PASSWORD + - MYSQL_RANDOM_ROOT_PASSWORD + EOF + fi + + # warn when missing one of MYSQL_USER or MYSQL_PASSWORD + if [ -n "$MYSQL_USER" ] && [ -z "$MYSQL_PASSWORD" ]; then + mysql_warn 'MYSQL_USER specified, but missing MYSQL_PASSWORD; MYSQL_USER will not be created' + elif [ -z "$MYSQL_USER" ] && [ -n "$MYSQL_PASSWORD" ]; then + mysql_warn 'MYSQL_PASSWORD specified, but missing MYSQL_USER; MYSQL_PASSWORD will be ignored' + fi +} + +# creates folders for the database +# also ensures permission for user mysql of run as root +docker_create_db_directories() { + local user; user="$(id -u)" + + local -A dirs=( ["$DATADIR"]=1 ) + local dir + dir="$(dirname "$SOCKET")" + dirs["$dir"]=1 + + # "datadir" and "socket" are already handled above (since they were already queried previously) + local conf + for conf in \ + general-log-file \ + keyring_file_data \ + pid-file \ + secure-file-priv \ + slow-query-log-file \ + ; do + dir="$(mysql_get_config "$conf" "$@")" + + # skip empty values + if [ -z "$dir" ] || [ "$dir" = 'NULL' ]; then + continue + fi + case "$conf" in + secure-file-priv) + # already points at a directory + ;; + *) + # other config options point at a file, but we need the directory + dir="$(dirname "$dir")" + ;; + esac + + dirs["$dir"]=1 + done + + mkdir -p "${!dirs[@]}" + + if [ "$user" = "0" ]; then + # this will cause less disk access than `chown -R` + find "${!dirs[@]}" \! -user mysql -exec chown --no-dereference mysql '{}' + + fi +} + +# initializes the database directory +docker_init_database_dir() { + mysql_note "Initializing database files" + "$@" --initialize-insecure --default-time-zone=SYSTEM + mysql_note "Database files initialized" +} + +# Loads various settings that are used elsewhere in the script +# This should be called after mysql_check_config, but before any other functions +docker_setup_env() { + # Get config + declare -g DATADIR SOCKET + DATADIR="$(mysql_get_config 'datadir' "$@")" + SOCKET="$(mysql_get_config 'socket' "$@")" + + # Initialize values that might be stored in a file + file_env 'MYSQL_ROOT_HOST' '%' + file_env 'MYSQL_DATABASE' + file_env 'MYSQL_USER' + file_env 'MYSQL_PASSWORD' + file_env 'MYSQL_ROOT_PASSWORD' + + declare -g DATABASE_ALREADY_EXISTS + if [ -d "$DATADIR/mysql" ]; then + DATABASE_ALREADY_EXISTS='true' + fi +} + +# Execute sql script, passed via stdin +# usage: docker_process_sql [--dont-use-mysql-root-password] [mysql-cli-args] +# ie: docker_process_sql --database=mydb <<<'INSERT ...' +# ie: docker_process_sql --dont-use-mysql-root-password --database=mydb /dev/null + + docker_init_database_dir "$@" + + mysql_note "Starting temporary server" + docker_temp_server_start "$@" + mysql_note "Temporary server started." + + mysql_socket_fix + docker_setup_db + docker_process_init_files /docker-entrypoint-initdb.d/* + + mysql_expire_root_user + + mysql_note "Stopping temporary server" + docker_temp_server_stop + mysql_note "Temporary server stopped" + + echo + mysql_note "MySQL init process done. Ready for start up." + echo + else + mysql_socket_fix + fi + fi + exec "$@" +} + +# If we are sourced from elsewhere, don't perform any further actions +if ! _is_sourced; then + _main "$@" +fi diff --git a/docker/build/nginx/Dockerfile b/docker/build/nginx/Dockerfile new file mode 100644 index 00000000000..5120184c4a5 --- /dev/null +++ b/docker/build/nginx/Dockerfile @@ -0,0 +1,18 @@ +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +USER root +ADD . /edx/app/edx_ansible/edx_ansible +COPY docker/build/nginx/ansible_overrides.yml / +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook nginx.yml -c local \ + -i '127.0.0.1,' \ + -e@roles/edxapp/defaults/main.yml \ + -e@roles/xqueue/defaults/main.yml \ + -e@roles/forum/defaults/main.yml + +RUN echo "\ndaemon off;" >> /etc/nginx/nginx.conf +WORKDIR /etc/nginx +CMD ["/usr/sbin/nginx"] +EXPOSE 18000 48000 18010 48010 18020 diff --git a/docker/build/nginx/ansible_overrides.yml b/docker/build/nginx/ansible_overrides.yml new file mode 100644 index 00000000000..70206d6a9a0 --- /dev/null +++ b/docker/build/nginx/ansible_overrides.yml @@ -0,0 +1,3 @@ +--- + +FLOCK_TLD: "edx" diff --git a/docker/build/notes/Dockerfile b/docker/build/notes/Dockerfile new file mode 100644 index 00000000000..66619362211 --- /dev/null +++ b/docker/build/notes/Dockerfile @@ -0,0 +1,38 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/notes/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +ENV NOTES_VERSION=${OPENEDX_RELEASE} +ENV REPO_OWNER=edx + +ADD . /edx/app/edx_ansible/edx_ansible + +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/notes/ansible_overrides.yml / +COPY docker/build/notes/edx_notes_api.yml /edx/etc/edx_notes_api.yml + + +RUN sudo apt-get update && sudo apt-get -y install python3-dev libmysqlclient-dev + +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook notes.yml \ + -c local -i '127.0.0.1,' \ + -t 'install,assets,devstack:install' \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="EDX_NOTES_API_VERSION=$NOTES_VERSION" \ + --extra-vars="COMMON_GIT_PATH=$REPO_OWNER" + +USER root +ENTRYPOINT ["/edx/app/edx_notes_api/devstack.sh"] +CMD ["start"] diff --git a/docker/build/notes/ansible_overrides.yml b/docker/build/notes/ansible_overrides.yml new file mode 100644 index 00000000000..4f0411e71be --- /dev/null +++ b/docker/build/notes/ansible_overrides.yml @@ -0,0 +1,13 @@ +--- +edx_notes_api_gunicorn_host: 0.0.0.0 +EDX_NOTES_API_MYSQL_HOST: 'db' +EDX_NOTES_API_ELASTICSEARCH_URL: 'http://es:9200/' +COMMON_MYSQL_MIGRATE_USER: '{{ EDX_NOTES_API_MYSQL_DB_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ EDX_NOTES_API_MYSQL_DB_PASSWORD }}' + +# For the docker image, force it to use the devstack settings instead of the +# default production settings (notesserver.settings.yaml_config). +# This is also consistent with all other IDAs. +EDX_NOTES_API_DJANGO_SETTINGS_MODULE: 'notesserver.settings.devstack' + +devstack: "true" diff --git a/docker/build/notes/edx_notes_api.yml b/docker/build/notes/edx_notes_api.yml new file mode 100644 index 00000000000..9782f5b4c87 --- /dev/null +++ b/docker/build/notes/edx_notes_api.yml @@ -0,0 +1,36 @@ +--- + +ALLOWED_HOSTS: +- localhost +CLIENT_ID: CHANGEME +CLIENT_SECRET: CHANGEME +DATABASES: + default: + ENGINE: django.db.backends.mysql + HOST: db + NAME: edx_notes_api + OPTIONS: + connect_timeout: 10 + PASSWORD: secret + PORT: '3306' + USER: notes001 +DISABLE_TOKEN_CHECK: false +ELASTICSEARCH_INDEX: edx_notes +ELASTICSEARCH_URL: http://es:9200/ +HAYSTACK_CONNECTIONS: + default: + ENGINE: notesserver.highlight.ElasticsearchSearchEngine + INDEX_NAME: edx_notes_api + URL: http://es:9200/ +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: SET-ME-PLEASE + ISSUER: http://127.0.0.1:8000/oauth2 + SECRET_KEY: SET-ME-PLEASE + JWT_PUBLIC_SIGNING_JWK_SET: '' +RESULTS_DEFAULT_SIZE: 25 +RESULTS_MAX_SIZE: 250 +SECRET_KEY: CHANGEME +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME diff --git a/docker/build/notes/inventory b/docker/build/notes/inventory new file mode 100644 index 00000000000..8bb7ba6b33a --- /dev/null +++ b/docker/build/notes/inventory @@ -0,0 +1,2 @@ +[local] +localhost diff --git a/docker/build/rabbitmq/Dockerfile b/docker/build/rabbitmq/Dockerfile new file mode 100644 index 00000000000..68321b75181 --- /dev/null +++ b/docker/build/rabbitmq/Dockerfile @@ -0,0 +1,19 @@ +FROM edxops/xenial-common:latest +LABEL maintainer="edxops" + +ADD . /edx/app/edx_ansible/edx_ansible +COPY docker/build/rabbitmq/ansible_overrides.yml / +COPY docker/build/rabbitmq/run_rabbitmq.sh / +RUN chmod +x /run_rabbitmq.sh + +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook rabbitmq.yml \ + -i '127.0.0.1,' -c local \ + -t 'install,manage:app-users' \ + -e@/ansible_overrides.yml + +WORKDIR /edx/app +EXPOSE 15672 5672 +CMD ["/run_rabbitmq.sh"] + diff --git a/docker/build/rabbitmq/ansible_overrides.yml b/docker/build/rabbitmq/ansible_overrides.yml new file mode 100644 index 00000000000..fefa4dc39c6 --- /dev/null +++ b/docker/build/rabbitmq/ansible_overrides.yml @@ -0,0 +1,2 @@ +--- +FLOCK_TLD: "edx" diff --git a/docker/build/rabbitmq/run_rabbitmq.sh b/docker/build/rabbitmq/run_rabbitmq.sh new file mode 100644 index 00000000000..c9b1696c268 --- /dev/null +++ b/docker/build/rabbitmq/run_rabbitmq.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +ulimit -n 1024 +exec rabbitmq-server $@ \ No newline at end of file diff --git a/docker/build/rabbitmq/sample.json b/docker/build/rabbitmq/sample.json new file mode 100644 index 00000000000..32095a86f3f --- /dev/null +++ b/docker/build/rabbitmq/sample.json @@ -0,0 +1,24 @@ +{ + "test-pull": + { + "AUTH": [ + "lms", + "password" + ], + "CONNECTIONS": 2, + "HANDLERS": [ + { + "CODEJAIL": { + "name": "demo", + "python_bin": "/edx/app/xqwatcher/venvs/demo/bin/python", + "user": "demo" + }, + "HANDLER": "xqueue_watcher.jailedgrader.JailedGrader", + "KWARGS": { + "grader_root": "../data/edx-demo-course/graders/" + } + } + ], + "SERVER": "http://xqueue.edx/" + } +} diff --git a/docker/build/registrar/Dockerfile b/docker/build/registrar/Dockerfile new file mode 100644 index 00000000000..eac42f69f77 --- /dev/null +++ b/docker/build/registrar/Dockerfile @@ -0,0 +1,33 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/registrar/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" +USER root +ENTRYPOINT ["/edx/app/registrar/devstack.sh"] +CMD ["start"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/registrar/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/registrar/registrar.yml /edx/etc/registrar.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook registrar.yml \ + -c local -i "127.0.0.1," \ + -t "install,assets,devstack" \ + --extra-vars="REGISTRAR_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18734 diff --git a/docker/build/registrar/ansible_overrides.yml b/docker/build/registrar/ansible_overrides.yml new file mode 100644 index 00000000000..649d30fd311 --- /dev/null +++ b/docker/build/registrar/ansible_overrides.yml @@ -0,0 +1,15 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ REGISTRAR_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ REGISTRAR_MYSQL_PASSWORD }}' + +REGISTRAR_MYSQL_HOST: 'edx.devstack.mysql' +REGISTRAR_DJANGO_SETTINGS_MODULE: 'registrar.settings.devstack' +REGISTRAR_GUNICORN_EXTRA: '--reload' +REGISTRAR_MEMCACHE: ['edx.devstack.memcached:11211'] +REGISTRAR_EXTRA_APPS: [] + +REGISTRAR_SECRET_KEY: 'hBiEM5pDr8GsZv1lh6GKmD0c9SF5Z00TFEoRY1zSmCxijFrR' + +edx_django_service_is_devstack: true diff --git a/docker/build/registrar/registrar.yml b/docker/build/registrar/registrar.yml new file mode 100644 index 00000000000..324d1322fb1 --- /dev/null +++ b/docker/build/registrar/registrar.yml @@ -0,0 +1,72 @@ +--- + +API_ROOT: http://localhost:18734/api +BACKEND_SERVICE_EDX_OAUTH2_KEY: registrar-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: registrar-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: registrar + LOCATION: + - edx.devstack.memcached:11211 +CELERY_ALWAYS_EAGER: false +CELERY_BROKER_HOSTNAME: '' +CELERY_BROKER_PASSWORD: '' +CELERY_BROKER_TRANSPORT: '' +CELERY_BROKER_USER: '' +CELERY_BROKER_VHOST: '' +CELERY_DEFAULT_EXCHANGE: registrar +CELERY_DEFAULT_QUEUE: registrar.default +CELERY_DEFAULT_ROUTING_KEY: registrar +CERTIFICATE_LANGUAGES: + en: English + es_419: Spanish +CORS_ORIGIN_WHITELIST: [] +CSRF_COOKIE_SECURE: false +CSRF_TRUSTED_ORIGINS: [] +DATABASES: + default: + ATOMIC_REQUESTS: false + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: registrar + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: registrar001 +DISCOVERY_BASE_URL: null +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +EXTRA_APPS: [] +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +LMS_BASE_URL: null +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/registrar/media + MEDIA_URL: /api/media/ +REGISTRAR_SERVICE_USER: registrar_service_user +SECRET_KEY: hBiEM5pDr8GsZv1lh6GKmD0c9SF5Z00TFEoRY1zSmCxijFrR +SEGMENT_KEY: null +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: registrar-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: registrar-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage +STATIC_ROOT: /edx/var/registrar/staticfiles +TIME_ZONE: UTC diff --git a/docker/build/tableau/Dockerfile b/docker/build/tableau/Dockerfile new file mode 100644 index 00000000000..93b433f108a --- /dev/null +++ b/docker/build/tableau/Dockerfile @@ -0,0 +1,27 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/tableau/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" +USER root + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/edxapp/ansible_overrides.yml / + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook tableau.yml \ + -c local -i '127.0.0.1,' \ + -t 'install,assets,devstack' \ + --extra-vars="@/ansible_overrides.yml" + +EXPOSE 8850 80 diff --git a/docker/build/tableau/ansible_overrides.yml b/docker/build/tableau/ansible_overrides.yml new file mode 100644 index 00000000000..582ed531c9a --- /dev/null +++ b/docker/build/tableau/ansible_overrides.yml @@ -0,0 +1,11 @@ +--- +TABLEAU_ADMIN_USER: tableau +TABLEAU_ADMIN_PASSWORD: 'password' +TABLEAU_SERVER_ADMIN_USER: "tableau-admin" +TABLEAU_SERVER_ADMIN_PASSWORD: "password" + +TABLEAU_REGISTRATION_CONFIG_USER_FIRST_NAME: "Saleem" +TABLEAU_REGISTRATION_CONFIG_USER_LAST_NAME: "Latif" +TABLEAU_REGISTRATION_CONFIG_USER_TITLE: "Software Engineer" +TABLEAU_REGISTRATION_CONFIG_USER_EMAIL: "saleem@edx.org" +TABLEAU_REGISTRATION_CONFIG_USER_PHONE: "" diff --git a/docker/build/tools_jenkins/Dockerfile b/docker/build/tools_jenkins/Dockerfile new file mode 100644 index 00000000000..8b408d99228 --- /dev/null +++ b/docker/build/tools_jenkins/Dockerfile @@ -0,0 +1,12 @@ +FROM edxops/focal-common:latest +LABEL maintainer="edxops" + +USER root +RUN apt-get update + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays +COPY docker/build/tools_jenkins/ansible_overrides.yml / +RUN PYTHONUNBUFFERED=1 /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook -v jenkins_tools.yml -i '127.0.0.1,' -c local -e@/ansible_overrides.yml -t 'install' -vv + +CMD /bin/su -l jenkins --shell=/bin/bash -c "/usr/bin/daemon -f --name=jenkins --inherit --env=JENKINS_HOME=/edx/var/jenkins --output=/var/log/jenkins/jenkins.log --pidfile=/var/run/jenkins/jenkins.pid -- /usr/bin/java -jar /usr/share/jenkins/jenkins.war --webroot=/var/cache/jenkins/war --httpPort=8080 --ajp13Port=-1" diff --git a/docker/build/tools_jenkins/ansible_overrides.yml b/docker/build/tools_jenkins/ansible_overrides.yml new file mode 100644 index 00000000000..16240a9e3d2 --- /dev/null +++ b/docker/build/tools_jenkins/ansible_overrides.yml @@ -0,0 +1,4 @@ +--- +DOCKER_TLD: "edx" +jenkins_venv_src_dir: "../../util/jenkins" + diff --git a/docker/build/trusty-common/Dockerfile b/docker/build/trusty-common/Dockerfile new file mode 100644 index 00000000000..dd58c8e3490 --- /dev/null +++ b/docker/build/trusty-common/Dockerfile @@ -0,0 +1,8 @@ +FROM ubuntu:trusty +LABEL maintainer="edxops" +ENV CONFIGURATION_REPO="https://github.com/openedx/configuration.git" +ENV CONFIGURATION_VERSION="master" + +ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh +RUN chmod +x /tmp/ansible-bootstrap.sh +RUN /tmp/ansible-bootstrap.sh diff --git a/docker/build/xenial-common/Dockerfile b/docker/build/xenial-common/Dockerfile new file mode 100644 index 00000000000..a2c6cadc529 --- /dev/null +++ b/docker/build/xenial-common/Dockerfile @@ -0,0 +1,29 @@ +ARG BASE_IMAGE_TAG=latest +FROM ubuntu:xenial +LABEL maintainer="edxops" + +# Set locale to UTF-8 which is not the default for docker. +# See the links for details: +# http://jaredmarkell.com/docker-and-locales/ +# https://github.com/docker-library/python/issues/13 +# https://github.com/docker-library/python/pull/14/files +# Also install software-properties-common to get apt-add-repository +RUN apt-get update &&\ + apt-get install -y locales software-properties-common &&\ + locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +ENV CONFIGURATION_REPO="https://github.com/openedx/configuration.git" +ARG OPENEDX_RELEASE=master +ENV CONFIGURATION_VERSION="${OPENEDX_RELEASE}" + +# Add the deadsnakes PPA to install Python 3.8 +RUN apt-add-repository -y ppa:deadsnakes/ppa +RUN apt-get update &&\ + apt-get install -y python3.8-dev python3.8-distutils + +ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh +RUN chmod +x /tmp/ansible-bootstrap.sh +RUN /tmp/ansible-bootstrap.sh diff --git a/docker/build/xqueue/Dockerfile b/docker/build/xqueue/Dockerfile new file mode 100644 index 00000000000..62abdb27a83 --- /dev/null +++ b/docker/build/xqueue/Dockerfile @@ -0,0 +1,31 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/xqueue/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/focal-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" +ENTRYPOINT ["/edx/app/xqueue/devstack.sh"] +CMD ["start"] + +USER root +RUN apt-get update +ADD . /edx/app/edx_ansible/edx_ansible +COPY docker/build/xqueue/ansible_overrides.yml / +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook xqueue.yml \ + -i '127.0.0.1,' \ + -c local \ + -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code,devstack" \ + --extra-vars="XQUEUE_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" + +EXPOSE 18040 diff --git a/docker/build/xqueue/ansible_overrides.yml b/docker/build/xqueue/ansible_overrides.yml new file mode 100644 index 00000000000..feaadc18e3f --- /dev/null +++ b/docker/build/xqueue/ansible_overrides.yml @@ -0,0 +1,8 @@ +--- +XQUEUE_SYSLOG_SERVER: "localhost" +XQUEUE_RABBITMQ_HOSTNAME: "edx.devstack.rabbit" +XQUEUE_MYSQL_HOST: "edx.devstack.mysql57" +XQUEUE_SETTINGS: "devstack" +xqueue_gunicorn_port: 18040 +xqueue_gunicorn_host: 0.0.0.0 +devstack: true diff --git a/docker/build/xqwatcher/Dockerfile b/docker/build/xqwatcher/Dockerfile new file mode 100644 index 00000000000..8a52ba1553d --- /dev/null +++ b/docker/build/xqwatcher/Dockerfile @@ -0,0 +1,27 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/xqwatcher/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/xenial-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" + +ADD . /edx/app/edx_ansible/edx_ansible +COPY docker/build/xqwatcher/ansible_overrides.yml / +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook xqwatcher.yml \ + -i '127.0.0.1,' -c local \ + -t "install:base,install:configuration,install:system-requirements,install:app-requirements,install:code" \ + --extra-vars="XQWATCHER_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" +WORKDIR /edx/app +ENTRYPOINT ["/edx/app/edxapp/devstack.sh"] +CMD ["start"] diff --git a/docker/build/xqwatcher/ansible_overrides.yml b/docker/build/xqwatcher/ansible_overrides.yml new file mode 100644 index 00000000000..ac9fe7dd2f9 --- /dev/null +++ b/docker/build/xqwatcher/ansible_overrides.yml @@ -0,0 +1,27 @@ +--- +FLOCK_TLD: "edx" + +# Note that this is currently a non-working exemplar configuration, +# there isn't a grader provided in the edx-demo-course yet. +XQWATCHER_COURSES: + - COURSE: "demo" + GIT_REPO: "https://github.com/openedx/edx-demo-course" + GIT_REF: "master" + QUEUE_NAME: "test-pull" + QUEUE_CONFIG: + SERVER: "http://xqueue.{{ FLOCK_TLD }}" + CONNECTIONS: 2 + AUTH: ["lms", "password"] + HANDLERS: + - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader" + CODEJAIL: + name: "demo" + bin_path: "{{ xqwatcher_venv_base }}/demo/bin/python" + user: "demo" + lang: python2 + KWARGS: + grader_root: "../data/edx-demo-course/graders/" + PYTHON_REQUIREMENTS: + - { name: "numpy", version: "1.6.2" } + - { name: "lxml", version: "2.3.6" } + PYTHON_EXECUTABLE: python2 diff --git a/docker/devstack_common_ansible_overrides.yml b/docker/devstack_common_ansible_overrides.yml new file mode 100644 index 00000000000..3dee7815726 --- /dev/null +++ b/docker/devstack_common_ansible_overrides.yml @@ -0,0 +1,11 @@ + +# These variables are loaded into most devstack images via their Dockerfile + +EDXAPP_LMS_BASE: 'edx.devstack.lms:18000' +EDXAPP_LMS_ROOT_URL: 'http://{{ EDXAPP_LMS_BASE }}' +EDXAPP_LMS_PUBLIC_ROOT_URL: 'http://localhost:18000/' + +COMMON_OAUTH_BASE_URL: '{{ EDXAPP_LMS_PUBLIC_ROOT_URL }}' +COMMON_OAUTH_URL_ROOT: '{{ EDXAPP_LMS_ROOT_URL }}/oauth2' +COMMON_JWT_AUDIENCE: 'lms-key' +COMMON_JWT_SECRET_KEY: 'lms-secret' diff --git a/docker/plays/ansible.cfg b/docker/plays/ansible.cfg new file mode 100644 index 00000000000..beaa0927b00 --- /dev/null +++ b/docker/plays/ansible.cfg @@ -0,0 +1,8 @@ +[defaults] + +jinja2_extensions=jinja2.ext.do +roles_path=../plays:../../playbooks/roles +library=../../playbooks/library + +[ssh_connection] +ssh_args=-o ControlMaster=auto -o ControlPersist=60s -o ControlPath="~/.ansible/tmp/ansible-ssh-%h-%p-%r" -o ServerAliveInterval=30 diff --git a/docker/plays/automated.yml b/docker/plays/automated.yml new file mode 100644 index 00000000000..d04a2644d78 --- /dev/null +++ b/docker/plays/automated.yml @@ -0,0 +1,7 @@ +- name: Deploy autom + hosts: all + become: True + gather_facts: True + roles: + - common + - automated diff --git a/docker/plays/credentials.yml b/docker/plays/credentials.yml new file mode 100644 index 00000000000..0845efe5e64 --- /dev/null +++ b/docker/plays/credentials.yml @@ -0,0 +1,9 @@ +- name: Deploy credentials + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - credentials diff --git a/docker/plays/designer.yml b/docker/plays/designer.yml new file mode 100644 index 00000000000..163d7496bb3 --- /dev/null +++ b/docker/plays/designer.yml @@ -0,0 +1,9 @@ +- name: Deploy designer + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - designer diff --git a/docker/plays/docker-tools.yml b/docker/plays/docker-tools.yml new file mode 100644 index 00000000000..0dfc6dd0839 --- /dev/null +++ b/docker/plays/docker-tools.yml @@ -0,0 +1,7 @@ +- name: build a VM with docker-tools + hosts: all + become: True + gather_facts: True + roles: + - common + - docker-tools diff --git a/docker/plays/ecommerce.yml b/docker/plays/ecommerce.yml new file mode 100644 index 00000000000..ef06356805a --- /dev/null +++ b/docker/plays/ecommerce.yml @@ -0,0 +1,11 @@ +- name: Deploy ecommerce + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - ecommerce + - sqlite_fix + - browsers diff --git a/docker/plays/ecomworker.yml b/docker/plays/ecomworker.yml new file mode 100644 index 00000000000..63e996af130 --- /dev/null +++ b/docker/plays/ecomworker.yml @@ -0,0 +1,10 @@ +- name: Deploy ecommerce worker + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - ecomworker diff --git a/docker/plays/edxapp.yml b/docker/plays/edxapp.yml new file mode 100644 index 00000000000..a1504d907dc --- /dev/null +++ b/docker/plays/edxapp.yml @@ -0,0 +1,10 @@ +- name: Deploy edxapp + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - edxapp diff --git a/docker/plays/elasticsearch.yml b/docker/plays/elasticsearch.yml new file mode 100644 index 00000000000..3783014eb0a --- /dev/null +++ b/docker/plays/elasticsearch.yml @@ -0,0 +1,6 @@ +- hosts: all + become: True + roles: + - common + - oraclejdk + - elasticsearch diff --git a/docker/plays/enterprise_catalog.yml b/docker/plays/enterprise_catalog.yml new file mode 100644 index 00000000000..499ff24cfb3 --- /dev/null +++ b/docker/plays/enterprise_catalog.yml @@ -0,0 +1,9 @@ +- name: Deploy enterprise catalog + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - enterprise_catalog diff --git a/docker/plays/forum.yml b/docker/plays/forum.yml new file mode 100644 index 00000000000..4cfd9e0c479 --- /dev/null +++ b/docker/plays/forum.yml @@ -0,0 +1,10 @@ +- name: Deploy forum + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - forum diff --git a/docker/plays/jenkins_analytics.yml b/docker/plays/jenkins_analytics.yml new file mode 100644 index 00000000000..153db2575c8 --- /dev/null +++ b/docker/plays/jenkins_analytics.yml @@ -0,0 +1,10 @@ +- name: Deploy the analytics jenkins + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - jenkins_analytics diff --git a/docker/plays/jenkins_tools.yml b/docker/plays/jenkins_tools.yml new file mode 100644 index 00000000000..7cab6ae0c58 --- /dev/null +++ b/docker/plays/jenkins_tools.yml @@ -0,0 +1,10 @@ +- name: Deploy the tools jenkins + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - tools_jenkins diff --git a/docker/plays/library b/docker/plays/library new file mode 120000 index 00000000000..00261b3d076 --- /dev/null +++ b/docker/plays/library @@ -0,0 +1 @@ +../../playbooks/library/ \ No newline at end of file diff --git a/docker/plays/mongo.yml b/docker/plays/mongo.yml new file mode 100644 index 00000000000..25a5268db8a --- /dev/null +++ b/docker/plays/mongo.yml @@ -0,0 +1,7 @@ +- name: Deploy MongoDB 3.2 + hosts: all + become: True + gather_facts: True + roles: + - common + - mongo_3_2 diff --git a/docker/plays/mysql.yml b/docker/plays/mysql.yml new file mode 100644 index 00000000000..ddcdbe720d0 --- /dev/null +++ b/docker/plays/mysql.yml @@ -0,0 +1,7 @@ +- name: Deploy MySQL 5.6 + hosts: all + become: True + gather_facts: True + roles: + - common + - mysql diff --git a/docker/plays/nginx.yml b/docker/plays/nginx.yml new file mode 100644 index 00000000000..96ab6437888 --- /dev/null +++ b/docker/plays/nginx.yml @@ -0,0 +1,20 @@ +- name: Deploy nginx + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - role: nginx + nginx_sites: + - lms + - cms + - xqueue + - certs + - forum + nginx_default_sites: + - lms + nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}" + nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}" diff --git a/docker/plays/notes.yml b/docker/plays/notes.yml new file mode 100644 index 00000000000..94ff04e5899 --- /dev/null +++ b/docker/plays/notes.yml @@ -0,0 +1,9 @@ +- name: Deploy Notes + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: edx_notes_api diff --git a/docker/plays/rabbitmq.yml b/docker/plays/rabbitmq.yml new file mode 100644 index 00000000000..458048651f6 --- /dev/null +++ b/docker/plays/rabbitmq.yml @@ -0,0 +1,10 @@ +- name: Deploy rabbitmq + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - rabbitmq diff --git a/docker/plays/registrar.yml b/docker/plays/registrar.yml new file mode 100644 index 00000000000..894c098bde7 --- /dev/null +++ b/docker/plays/registrar.yml @@ -0,0 +1,9 @@ +- name: Deploy registrar + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - registrar diff --git a/docker/plays/roles b/docker/plays/roles new file mode 120000 index 00000000000..176ed93c3e7 --- /dev/null +++ b/docker/plays/roles @@ -0,0 +1 @@ +../../playbooks/roles/ \ No newline at end of file diff --git a/docker/plays/tableau.yml b/docker/plays/tableau.yml new file mode 100644 index 00000000000..bb828367e03 --- /dev/null +++ b/docker/plays/tableau.yml @@ -0,0 +1,10 @@ +- name: Deploy tableau + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + CLUSTER_NAME: 'tableau' + serial: "{{ serial_count }}" + roles: + - tableau diff --git a/docker/plays/xqueue.yml b/docker/plays/xqueue.yml new file mode 100644 index 00000000000..8003b22f456 --- /dev/null +++ b/docker/plays/xqueue.yml @@ -0,0 +1,7 @@ +- name: Deploy xqueue + hosts: all + become: True + gather_facts: True + roles: + - common + - xqueue diff --git a/docker/plays/xqwatcher.yml b/docker/plays/xqwatcher.yml new file mode 100644 index 00000000000..2e9f73a3039 --- /dev/null +++ b/docker/plays/xqwatcher.yml @@ -0,0 +1,7 @@ +- name: Deploy xqwatcher + hosts: all + become: True + gather_facts: True + roles: + - common + - xqwatcher diff --git a/documentation/0001-ansible-code-conventions.rst b/documentation/0001-ansible-code-conventions.rst new file mode 100644 index 00000000000..3d61c0fc51d --- /dev/null +++ b/documentation/0001-ansible-code-conventions.rst @@ -0,0 +1,143 @@ +======================== +Ansible Code Conventions +======================== + +General Conventions +=================== +**Spacing** +* YAML files - All yaml files should use 2 space indents and end with .yml +* Use spaces around jinja variable names. {{ var }} not {{var}} + +**Variables** +* Variables - Use jinja variable syntax over deprecated variable syntax. {{ var }} not $var +* Variables that are environment specific and that need to be overridden should be in ALL CAPS. +* Variables that are internal to the role should be lowercase. +* Prefix all variables defined in a role with the name of the role. Example: EDXAPP_FOO + +**Roles/Plays/Playbooks** +* Keep roles self contained - Roles should avoid including tasks from other roles when possible +* Plays should do nothing more than include a list of roles except where pre_tasks and post_tasks are required (to manage a load balancer for example) +* Plays/Playbooks that apply to the general community should be copied to configuration/playbooks + +**ETC** +* Handlers - Do not use handlers. If you need to restart an app when specific tasks run, just add a task to do so at the end of the playbook. If necessary, it can be skipped with tags (see `Role Life-cycle Tags`_) +* Separators - Use underscores (e.g. my_role) not dashes (my-role). +* Paths - When defining paths, do not include trailing slashes (e.g. my_path: /foo not my_path: /foo/. When concatenating paths, follow the same convention (e.g. {{ my_path }}/bar not {{ my_path }}bar) + +.. _Role Life-cycle Tags: https://openedx.atlassian.net/wiki/spaces/OpenOPS/pages/39584735/Role+Life-cycle+Tags + + +Conditionals and Return Status +============================== + +Always use ``when:`` for conditionals + +.. code-block:: bash + + when: my_var is defined + when: my_var is not defined + +To verify return status (see `ansible docs conditionals`_) + +.. code-block:: yaml + + - command: /bin/false + register: my_result + ignore_errors: True + - debug: msg="task failed" + when: my_result|failed + + +.. _ansible docs conditionals: http://docs.ansible.com/playbooks_conditionals.html + +Formatting +========== + +Use yaml-style blocks. + +Good: + +.. code-block:: yaml + + - file: + dest: "{{ test }}" + src: "./foo.txt" + mode: 0770 + state: present + user: "root" + group: "wheel" + +Bad: + +.. code-block:: yaml + + - file: > + dest={{ test }} src=./foo.txt mode=0770 + state=present user=root group=wheel + +Break long lines using yaml line continuation. `Reference`_ + +.. code-block:: yaml + + - shell: > + python a very long command --with=very --long-options=foo + --and-even=more_options --like-these + + +.. _Reference: http://docs.ansible.com/playbooks_intro.html + +Roles +===== + +**Role Variables** + +- ``common`` role - Contains tasks that apply to all roles. +- ``common_vars`` role - Contains vars that apply to all roles. +- *Roles variables* - Variables specific to a role should be defined in /vars/main.yml. All variables should be prefixed with the role name. +- *Role defaults* - Default variables should configure a role to install edx in such away that all services can run on a single server +- Variables that are environment specific and that need to be overridden should be in all caps. +Every role should have a standard set of role directories, example that includes a python and ruby virtualenv: + +.. code-block:: yaml + + edxapp_data_dir: "{{ COMMON_DATA_DIR }}/edxapp" + edxapp_app_dir: "{{ COMMON_APP_DIR }}/edxapp" + edxapp_log_dir: "{{ COMMON_LOG_DIR }}/edxapp" + edxapp_venvs_dir: "{{ edxapp_app_dir }}/venvs" + edxapp_venv_dir: "{{ edxapp_venvs_dir }}/edxapp" + edxapp_venv_bin: "{{ edxapp_venv_dir }}/bin" + edxapp_rbenv_dir: "{{ edxapp_app_dir }}" + edxapp_rbenv_root: "{{ edxapp_rbenv_dir }}/.rbenv" + edxapp_rbenv_shims: "{{ edxapp_rbenv_root }}/shims" + edxapp_rbenv_bin: "{{ edxapp_rbenv_root }}/bin" + edxapp_gem_root: "{{ edxapp_rbenv_dir }}/.gem" + edxapp_gem_bin: "{{ edxapp_gem_root }}/bin" + + +**Role Naming Conventions** + +- *Role names* - Terse, one word if possible, use underscores if necessary. +- *Role task names* - Terse, descriptive, spaces are OK and should be prefixed with the role name. + +Secure vs. Insecure data +======================== + +As a general policy we want to protect the following data: + +- Usernames +- Public keys (keys are OK to be public, but can be used to figure out usernames) +- Hostnames +- Passwords, API keys + +Directory structure for the secure repository: + +.. code-block:: text + + ansible + ├── files + ├── keys + └── vars + + + +Secure vars are set in files under the ``ansible/vars`` directory. These files will be passed in when the relevant ansible-playbook commands are run. If you need a secure variable defined, give it a name and use it in your playbooks like any other variable. The value should be set in the secure vars files of the relevant deployment (edx, edge, etc.). If you don't have access to this repository, you'll need to submit a ticket to the SRE team to make the secure change. diff --git a/documentation/decisions/0001-mongo-4.2-upgrade.rst b/documentation/decisions/0001-mongo-4.2-upgrade.rst new file mode 100644 index 00000000000..5975c5d8db8 --- /dev/null +++ b/documentation/decisions/0001-mongo-4.2-upgrade.rst @@ -0,0 +1,23 @@ +Mongo 4.2 Upgrade +-------------------------------- +Status +===== +Accepted + +Context +======= + +The Open edX community relese Maple will be released on December 9th 2021. Mongo version 4.0 security support will be ended on 30 Apr 2022 ( https://endoflife.date/mongodb ) and Mongo 4.0 End-of-Life (EOL) is before we move for a new Open edX release after Maple. +Maple should be running with supported version of Mongo during its lifecycle. + +Decision +======== + +Initial plan was to upgrade Mongo to 4.4.As, we are running mongo community edition in all our environments and mongo cloud manager is breaking the support of incremental backup starting from Mongo 4.2 with FCV (4.2). To run backup and restore for MongoDB 4.2 or later with FCV 4.2 or later Mongo should be running with MongoDB enterprise edition and must run with MongoDB agent on every node of cluster. Because of these requirements and limitations we have decided to upgrade MongoDB to 4.2 with FCV 4.0. + +Requirements +=========== + +* To upgrade an existing MongoDB deployment to 4.2, earlier version should run Mongo 4.0-series. +* MongoDB driver should compatible for MongoDB 4.2. + Check python driver (pymongo) compatibility for edx-platform (https://docs.mongodb.com/drivers/pymongo/#compatibility) and ruby driver (mongoid) for cs_comment_service here (https://docs.mongodb.com/mongoid/master/reference/compatibility/#mongodb-server-compatibility) diff --git a/git-hooks/post-checkout.in b/git-hooks/post-checkout.in deleted file mode 100755 index 6b95cbb57a0..00000000000 --- a/git-hooks/post-checkout.in +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh - -dir=`git rev-parse --show-toplevel` -if [ -z $dir ]; then - exit 1 -fi - -echo -n Setting up hooks from git-hooks.. -$dir/util/sync_hooks.sh >/dev/null -if [ $? -eq 0 ]; then - echo . done. -else - exit 1 -fi diff --git a/git-hooks/pre-commit.in b/git-hooks/pre-commit.in deleted file mode 100755 index 538fafcaebb..00000000000 --- a/git-hooks/pre-commit.in +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh - -dir=`git rev-parse --show-toplevel` -if [ -z $dir ]; then - exit 1 -fi - -echo -n Checking JSON parses.. -$dir/util/json_lint.sh -if [ $? -eq 0 ]; then - echo . it does! -else - exit 1 -fi diff --git a/openedx.yaml b/openedx.yaml new file mode 100644 index 00000000000..f9c5fc26285 --- /dev/null +++ b/openedx.yaml @@ -0,0 +1,9 @@ +# This file describes this Open edX repo, as described in OEP-2: +# http://open-edx-proposals.readthedocs.io/en/latest/oeps/oep-0002.html#specification + +nick: conf +openedx-release: {ref: master} +oeps: + oep-2: true + oep-7: true + oep-18: true diff --git a/playbooks/active_instances_in_asg.py b/playbooks/active_instances_in_asg.py new file mode 100755 index 00000000000..eeadf711bc6 --- /dev/null +++ b/playbooks/active_instances_in_asg.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +""" +Build an ansible inventory list suitable for use by -i by finding the active +Auto Scaling Group in an Elastic Load Balancer. + +If multiple ASGs are active in the ELB, no inventory is returned. + +Assuming a single active ASG is found, a single machine is returned. This inventory +is generally used to target a single machine in a cluster to run a cmomand. + +Typical reponse: + +10.2.42.79, + +Typical use + +ansible -i $(active_instances_in_asg.py --asg stage-edx-edxapp) -m shell -a 'management command' + +""" + +import argparse +import botocore.session +import botocore.exceptions +import sys +from collections import defaultdict +from os import environ +from itertools import chain +import random + +class ActiveInventory(): + + profile = None + + def __init__(self, profile, region): + self.profile = profile + self.region = region + + def run(self,asg_name): + session = botocore.session.Session(profile=self.profile) + asg = session.create_client('autoscaling',self.region) + ec2 = session.create_client('ec2',self.region) + + asg_paginator = asg.get_paginator('describe_auto_scaling_groups') + asg_iterator = asg_paginator.paginate() + matching_groups = [] + for groups in asg_iterator: + for asg in groups['AutoScalingGroups']: + asg_inactive = len(asg['SuspendedProcesses']) > 0 + if asg_inactive: + continue + for tag in asg['Tags']: + if tag['Key'] == 'Name' and tag['Value'] == asg_name: + matching_groups.append(asg) + + groups_to_instances = defaultdict(list) + instances_to_groups = {} + + # for all instances in all auto scaling groups + for group in matching_groups: + for instance in group['Instances']: + if instance['LifecycleState'] == 'InService': + groups_to_instances[group['AutoScalingGroupName']].append(instance['InstanceId']) + instances_to_groups[instance['InstanceId']] = group['AutoScalingGroupName'] + + + # We only need to check for ASGs in an ELB if we have more than 1. + # If a cluster is running with an ASG out of the ELB, then there are larger problems. + active_groups = defaultdict(dict) + if len(matching_groups) > 1: + elb = session.create_client('elb',self.region) + for group in matching_groups: + for load_balancer_name in group['LoadBalancerNames']: + instances = elb.describe_instance_health(LoadBalancerName=load_balancer_name) + active_instances = [instance['InstanceId'] for instance in instances['InstanceStates'] if instance['State'] == 'InService'] + for instance_id in active_instances: + active_groups[instances_to_groups[instance_id]] = 1 + + # If we found no active groups, because there are no ELBs (edxapp workers normally) + elbs = list(chain.from_iterable([group['LoadBalancerNames'] for group in matching_groups])) + if not (active_groups or elbs): + # This implies we're in a worker cluster since we have no ELB and we didn't find an active group above + for group in matching_groups: + # Asgard marks a deleting ASG with SuspendedProcesses + # If the ASG doesn't have those, then it's "Active" and a worker since there was no ELB above + if not {'Launch','AddToLoadBalancer'} <= {i['ProcessName'] for i in group['SuspendedProcesses']}: + active_groups[group['AutoScalingGroupName']] = 1 + + if len(active_groups) > 1: + # When we have more than a single active ASG, we need to bail out as we don't know what ASG to pick an instance from + print("Multiple active ASGs - unable to choose an instance", file=sys.stderr) + return + else: + active_groups = { g['AutoScalingGroupName']: 1 for g in matching_groups } + + + for group in active_groups.keys(): + for group_instance in groups_to_instances[group]: + instance = random.choice(ec2.describe_instances(InstanceIds=[group_instance])['Reservations'][0]['Instances']) + if 'PrivateIpAddress' in instance: + print("{},".format(instance['PrivateIpAddress'])) + return # We only want a single IP + + +if __name__=="__main__": + + parser = argparse.ArgumentParser() + parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.') + parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='http://github.com/store_true', default=True) + parser.add_argument('--asg',help='Name of the ASG we want active instances from.', required=True) + args = parser.parse_args() + + region = environ.get('AWS_REGION','us-east-1') + + ActiveInventory(args.profile,region).run(args.asg) diff --git a/playbooks/add-ubuntu-key.yml b/playbooks/add-ubuntu-key.yml new file mode 100644 index 00000000000..5ed1b15cca1 --- /dev/null +++ b/playbooks/add-ubuntu-key.yml @@ -0,0 +1,39 @@ +# A simple utility play to add a public key to the authorized key +# file for the ubuntu user. +# You must pass in the entire line that you are adding. +# Example: ansible-playbook add-ubuntu-key.yml -c local -i 127.0.0.1, \ +# -e "public_key=deployment-201407" \ +# -e owner=jarv -e keyfile=/home/jarv/.ssh/authorized_keys + +- hosts: all + vars: + # Number of instances to operate on at a time + serial_count: 1 + owner: ubuntu + keyfile: "/home/{{ owner }}/.ssh/authorized_keys" + serial: "{{ serial_count }}" + tasks: + - fail: + msg: "You must pass in a public_key" + when: public_key is not defined + - fail: + msg: "public does not exist in secrets" + when: ubuntu_public_keys[public_key] is not defined + - command: mktemp + register: mktemp + - name: Validate the public key before we add it to authorized_keys + copy: + content: "{{ ubuntu_public_keys[public_key] }}" + dest: "{{ mktemp.stdout }}" + # This tests the public key and will not continue if it does not look valid + - command: ssh-keygen -l -f {{ mktemp.stdout }} + - file: + path: "{{ mktemp.stdout }}" + state: absent + - lineinfile: + dest: "{{ keyfile }}" + line: "{{ ubuntu_public_keys[public_key] }}" + - file: + path: "{{ keyfile }}" + owner: "{{ owner }}" + mode: 0600 diff --git a/playbooks/aide.yml b/playbooks/aide.yml new file mode 100644 index 00000000000..36f5ab46fa3 --- /dev/null +++ b/playbooks/aide.yml @@ -0,0 +1,17 @@ +- name: Deploy aide IDS + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - aide + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/alton.yml b/playbooks/alton.yml new file mode 100644 index 00000000000..9865706a34c --- /dev/null +++ b/playbooks/alton.yml @@ -0,0 +1,13 @@ +# Configure an admin instance with jenkins and asgard. +# Usage: ansible-playbook alton.yml -i , -e /admin/edx_admin.yml -e /admin/admin.yml +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - alton diff --git a/playbooks/analytics-jenkins.yml b/playbooks/analytics-jenkins.yml new file mode 100644 index 00000000000..ea2171bd246 --- /dev/null +++ b/playbooks/analytics-jenkins.yml @@ -0,0 +1,13 @@ +# Usage: ansible-playbook -i jenkins-host, -e@path/to/jenkins-extra-vars.yml -e@path/to/secure.yml -e 'COMMON_ENABLE_NEWRELIC=false' -e 'COMMON_ENABLE_DATADOG=false' + +- name: Configure instance(s) + hosts: all + become: True + roles: + - role: jenkins_analytics + ansible_ssh_user: ubuntu + - role: nginx + nginx_sites: + - jenkins + nginx_default_sites: + - jenkins diff --git a/playbooks/analytics_single.yml b/playbooks/analytics_single.yml new file mode 100644 index 00000000000..fbe2bbbda3e --- /dev/null +++ b/playbooks/analytics_single.yml @@ -0,0 +1,49 @@ +--- + +# Open edX Native installation for single server analytics installs. + +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + +- name: Deploy all analytics services to a single node + hosts: all + become: True + gather_facts: True + vars: + migrate_db: "yes" + disable_edx_services: false + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + SANDBOX_ENABLE_ANALYTICS_API: True + SANDBOX_ENABLE_ANALYTICS_PIPELINE: True + SANDBOX_ENABLE_INSIGHTS: True + EDXAPP_MYSQL_HOST: '' + EDXAPP_MEMCACHE: '' + POSTFIX_QUEUE_EXTERNAL_SMTP_HOST: '' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: edxlocal + when: EDXAPP_MYSQL_HOST == 'localhost' + - role: memcache + when: "'localhost' in ' '.join(EDXAPP_MEMCACHE)" + - role: nginx + nginx_sites: + - analytics_api + when: SANDBOX_ENABLE_ANALYTICS_API + - role: analytics_api + when: SANDBOX_ENABLE_ANALYTICS_API + - role: analytics_pipeline + when: SANDBOX_ENABLE_ANALYTICS_PIPELINE + - role: nginx + nginx_sites: + - insights + when: SANDBOX_ENABLE_INSIGHTS + - role: insights + when: SANDBOX_ENABLE_INSIGHTS + - role: postfix_queue + when: POSTFIX_QUEUE_EXTERNAL_SMTP_HOST != '' diff --git a/playbooks/analyticsapi.yml b/playbooks/analyticsapi.yml new file mode 100644 index 00000000000..5551301b698 --- /dev/null +++ b/playbooks/analyticsapi.yml @@ -0,0 +1,26 @@ +- name: Deploy Analytics API + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'analytics-api' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - analytics_api + - analytics_api + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'analytics_api' + when: ANALYTICS_API_HERMES_ENABLED diff --git a/playbooks/ansible.cfg b/playbooks/ansible.cfg index 973d462fb54..c6e92ac8695 100644 --- a/playbooks/ansible.cfg +++ b/playbooks/ansible.cfg @@ -6,4 +6,9 @@ jinja2_extensions=jinja2.ext.do host_key_checking = False -roles_path=../../../ansible-roles +roles_path=../../ansible-roles/roles:../../ansible-private/roles:../../ansible-roles/ +retries=5 +ansible_managed=This file is created and updated by ansible, edit at your peril + +[ssh_connection] +ssh_args=-o ControlMaster=auto -o ControlPersist=60s -o ControlPath="~/.ansible/tmp/ansible-ssh-%h-%p-%r" -o ServerAliveInterval=30 diff --git a/playbooks/antivirus.yml b/playbooks/antivirus.yml new file mode 100644 index 00000000000..dc7c9335607 --- /dev/null +++ b/playbooks/antivirus.yml @@ -0,0 +1,15 @@ +- name: Deploy Antivirus Scanner + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - antivirus + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/apply-security.yml b/playbooks/apply-security.yml new file mode 100644 index 00000000000..cb55cec019a --- /dev/null +++ b/playbooks/apply-security.yml @@ -0,0 +1,17 @@ +# Applies the security role to a set of inventories. Ensures that security updates +# are taken on these machines. +# +# This is useful if you have a set of deployed instances that are not periodically +# upgraded via replacement (such as your persistence layers). +# +# ansible-playbook -v -i ec2.py, apply-security.yml -limit tag_Name_Snowflake +# ansible-playbook -v -i hostname, apply-security.yml + +- name: Apply security updates + hosts: all + gather_facts: True + become: True + vars: + SECURITY_UPGRADE_ON_ANSIBLE: true + roles: + - security diff --git a/playbooks/authn_frontend.yml b/playbooks/authn_frontend.yml new file mode 100644 index 00000000000..9d80a8e9589 --- /dev/null +++ b/playbooks/authn_frontend.yml @@ -0,0 +1,15 @@ +- name: Deploy Authn MFE Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'authn' + AUTHN_ENABLED: True + AUTHN_SANDBOX_BUILD: False + roles: + - role: authn + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/automated.yml b/playbooks/automated.yml new file mode 100644 index 00000000000..2612c25fa9a --- /dev/null +++ b/playbooks/automated.yml @@ -0,0 +1,14 @@ +- name: Deploy automated role + hosts: all + sudo: True + gather_facts: True + roles: +# - aws + - automated + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/aws.yml b/playbooks/aws.yml new file mode 100644 index 00000000000..6cfd5ce7f39 --- /dev/null +++ b/playbooks/aws.yml @@ -0,0 +1,11 @@ +- name: Deploy aws + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - role: aws + when: COMMON_ENABLE_AWS_ROLE diff --git a/playbooks/blockstore.yml b/playbooks/blockstore.yml new file mode 100644 index 00000000000..eb33aab349b --- /dev/null +++ b/playbooks/blockstore.yml @@ -0,0 +1,21 @@ +- name: Deploy blockstore + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'blockstore' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - blockstore + - blockstore + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'blockstore' + when: BLOCKSTORE_HERMES_ENABLED diff --git a/playbooks/bootstrap_python.yml b/playbooks/bootstrap_python.yml new file mode 100644 index 00000000000..8450633da2f --- /dev/null +++ b/playbooks/bootstrap_python.yml @@ -0,0 +1,15 @@ +--- +# Runs the python bootstratpping role against an ubuntu machine +# This is not as complete as ansible_bootstrap.sh (intentionally so) +# This lets you get python2.7 installed on a machine so you can followup +# with your actual playbook or role. The key is gather_facts: False. +# +# Usage: +# ansible-playbook ./bootstrap_python.yml -i "hostname," +# +- hosts: all + become: True + gather_facts: True + roles: + - role: python + when: ansible_distribution_release != 'focal' diff --git a/playbooks/callback_plugins/sqs.py b/playbooks/callback_plugins/sqs.py index f653ee049a6..d9e7d2038d2 100644 --- a/playbooks/callback_plugins/sqs.py +++ b/playbooks/callback_plugins/sqs.py @@ -15,20 +15,24 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# From https://github.com/ansible/ansible/issues/31527#issuecomment-335495855 import os import sys import time import json +import socket try: + import boto +except ImportError: + boto = None +else: import boto.sqs from boto.exception import NoAuthHandlerFound -except ImportError: - print "Boto is required for the sqs_notify callback plugin" - raise +from ansible.plugins.callback import CallbackBase -class CallbackModule(object): +class CallbackModule(CallbackBase): """ This Ansible callback plugin sends task events to SQS. @@ -46,36 +50,42 @@ class CallbackModule(object): - START events """ def __init__(self): + self.enable_sqs = 'ANSIBLE_ENABLE_SQS' in os.environ + if not self.enable_sqs: + return + + # make sure we got our imports + if not boto: + raise ImportError( + "The sqs callback module requires the boto Python module, " + "which is not installed or was not found." + ) self.start_time = time.time() - if 'ANSIBLE_ENABLE_SQS' in os.environ: - self.enable_sqs = True - if not 'SQS_REGION' in os.environ: - print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \ - 'not defined in environment' - sys.exit(1) - self.region = os.environ['SQS_REGION'] - try: - self.sqs = boto.sqs.connect_to_region(self.region) - except NoAuthHandlerFound: - print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \ - 'to AWS due invalid credentials' - sys.exit(1) - if not 'SQS_NAME' in os.environ: - print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \ - 'defined in environment' - sys.exit(1) - self.name = os.environ['SQS_NAME'] - self.queue = self.sqs.create_queue(self.name) - if 'SQS_MSG_PREFIX' in os.environ: - self.prefix = os.environ['SQS_MSG_PREFIX'] - else: - self.prefix = '' - - self.last_seen_ts = {} + if not 'SQS_REGION' in os.environ: + print('ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \ + 'not defined in environment') + sys.exit(1) + self.region = os.environ['SQS_REGION'] + try: + self.sqs = boto.sqs.connect_to_region(self.region) + except NoAuthHandlerFound: + print('ANSIBLE_ENABLE_SQS enabled but cannot connect ' \ + 'to AWS due invalid credentials') + sys.exit(1) + if not 'SQS_NAME' in os.environ: + print('ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \ + 'defined in environment') + sys.exit(1) + self.name = os.environ['SQS_NAME'] + self.queue = self.sqs.create_queue(self.name) + if 'SQS_MSG_PREFIX' in os.environ: + self.prefix = os.environ['SQS_MSG_PREFIX'] else: - self.enable_sqs = False + self.prefix = '' + + self.last_seen_ts = {} def runner_on_failed(self, host, res, ignore_errors=False): if self.enable_sqs: @@ -85,7 +95,7 @@ def runner_on_failed(self, host, res, ignore_errors=False): def runner_on_ok(self, host, res): if self.enable_sqs: # don't send the setup results - if res['invocation']['module_name'] != "setup": + if 'invocation' in res and 'module_name' in res['invocation'] and res['invocation']['module_name'] != "setup": self._send_queue_message(res, 'OK') def playbook_on_task_start(self, name, is_conditional): @@ -125,8 +135,22 @@ def _send_queue_message(self, msg, msg_type): if output in payload[msg_type]: # only keep the last 1000 characters # of stderr and stdout - if len(payload[msg_type][output]) > 1000: + # Some modules set the value of stdout or stderr to booleans in + # which case the len will fail. Check to see if there is content + # before trying to clip it. + if payload[msg_type][output] and len(payload[msg_type][output]) > 1000: payload[msg_type][output] = "(clipping) ... " \ + payload[msg_type][output][-1000:] - - self.sqs.send_message(self.queue, json.dumps(payload)) + if 'stdout_lines' in payload[msg_type]: + # only keep the last 20 or so lines to avoid payload size errors + if len(payload[msg_type]['stdout_lines']) > 20: + payload[msg_type]['stdout_lines'] = ['(clipping) ... '] + payload[msg_type]['stdout_lines'][-20:] + while True: + try: + self.sqs.send_message(self.queue, json.dumps(payload)) + break + except socket.gaierror as e: + print('socket.gaierror will retry: ' + e) + time.sleep(1) + except Exception as e: + raise e diff --git a/playbooks/cluster_rabbitmq.yml b/playbooks/cluster_rabbitmq.yml new file mode 100644 index 00000000000..ea5e5ce6eb4 --- /dev/null +++ b/playbooks/cluster_rabbitmq.yml @@ -0,0 +1,49 @@ + +# ansible-playbook -i ec2.py cluster_rabbitmq.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts + +- hosts: all + become: True + serial: 1 + vars: + # By default take instances in and out of the elb(s) they + # are attached to + # To skip elb operations use "-e elb_pre_post=fase" + elb_pre_post: true + # Number of instances to operate on at a time + serial_count: 1 + serial: "{{ serial_count }}" + pre_tasks: + - action: ec2_metadata_facts + - debug: + var: "{{ ansible_ec2_instance_id }}" + when: elb_pre_post + - name: Instance De-register + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + region: us-east-1 + state: absent + wait_timeout: 60 + become: False + when: elb_pre_post + tasks: + - debug: + var: "{{ ansible_ec2_local_ipv4 }}" + with_items: "{{ list.results }}" + - command: rabbitmqctl stop_app + - command: rabbitmqctl join_cluster rabbit@ip-{{ hostvars.keys()[0]|replace('.', '-') }} + when: hostvars.keys()[0] != ansible_ec2_local_ipv4 + - command: rabbitmqctl start_app + post_tasks: + - debug: var="{{ ansible_ec2_instance_id }}" + when: elb_pre_post + - name: Register instance in the elb + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ ec2_elbs }}" + region: us-east-1 + state: present + wait_timeout: 60 + become: False + when: elb_pre_post diff --git a/playbooks/commerce_coordinator.yml b/playbooks/commerce_coordinator.yml new file mode 100644 index 00000000000..5968f528ff8 --- /dev/null +++ b/playbooks/commerce_coordinator.yml @@ -0,0 +1,22 @@ +- name: Deploy edX Commerce Coordinator + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'commerce_coordinator' + REGISTRAR_ENABLED: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - commerce_coordinator + - commerce_coordinator + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'commerce_coordinator' + when: COMMERCE_COORDINATOR__HERMES_ENABLED diff --git a/playbooks/common.yml b/playbooks/common.yml new file mode 100644 index 00000000000..b17e46e6324 --- /dev/null +++ b/playbooks/common.yml @@ -0,0 +1,16 @@ +- name: Deploy common + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/common_edx_base.yml b/playbooks/common_edx_base.yml new file mode 100644 index 00000000000..7ffeb4ef580 --- /dev/null +++ b/playbooks/common_edx_base.yml @@ -0,0 +1,22 @@ +# This is a playbook that edX uses to ensure that common configuration is +# deployed, security updates are applied, users accounts are updated on the +# machines and our monitoring software is updated or installed. +# +- name: Deploy common base edX configuration + hosts: all + become: True + gather_facts: True + vars: + SECURITY_UNATTENDED_UPGRADES: true + COMMON_SECURITY_UPDATES: true + roles: + - common + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/commoncluster.yml b/playbooks/commoncluster.yml new file mode 100644 index 00000000000..28a2ede2dc9 --- /dev/null +++ b/playbooks/commoncluster.yml @@ -0,0 +1,71 @@ +# ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts + +- hosts: all + become: True + serial: 1 + vars: + # By default take instances in and out of the elb(s) they + # are attached to + # To skip elb operations use "-e elb_pre_post=fase" + elb_pre_post: true + # Number of instances to operate on at a time + serial_count: 1 + serial: "{{ serial_count }}" + pre_tasks: + - action: ec2_metadata_facts + when: elb_pre_post + - debug: var="{{ ansible_ec2_instance_id }}" + when: elb_pre_post + - name: Instance De-register + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + region: us-east-1 + state: absent + wait_timeout: 60 + become: False + when: elb_pre_post + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: nginx + nginx_sites: + - xqueue + - xqueue + - oraclejdk + - elasticsearch + - rabbitmq + post_tasks: + - debug: var="{{ ansible_ec2_instance_id }}" + when: elb_pre_post + - name: Register instance in the elb + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ ec2_elbs }}" + region: us-east-1 + state: present + wait_timeout: 60 + become: False + when: elb_pre_post +# +# In order to reconfigure the host resolution we are issuing a +# reboot. +# TODO: We should probably poll to ensure the host comes back before moving +# to the next host so that we don't reboot all of the servers simultaneously +- hosts: all + become: True + serial: 1 + vars: + reboot: False + tasks: + - name: reboot + command: /sbin/shutdown -r now "Reboot is triggered by Ansible" + when: reboot + tags: reboot diff --git a/playbooks/conductor.yml b/playbooks/conductor.yml new file mode 100644 index 00000000000..36bdc85f8b2 --- /dev/null +++ b/playbooks/conductor.yml @@ -0,0 +1,27 @@ +- name: Deploy conductor (router for learner portal) + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'conductor' + NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE: True + NGINX_MAP_HASH_MAX_SIZE: 4096 + NGINX_MAP_HASH_BUCKET_SIZE: 128 + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_app_dir: "/etc/nginx" + nginx_sites: + - conductor + nginx_default_sites: + - conductor + CONDUCTOR_NGINX_PORT: 8000 + - role: conductor + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog + when: COMMON_ENABLE_DATADOG diff --git a/playbooks/connect_sandbox.yml b/playbooks/connect_sandbox.yml new file mode 100644 index 00000000000..f33fdc89383 --- /dev/null +++ b/playbooks/connect_sandbox.yml @@ -0,0 +1,39 @@ +- name: connect a sandbox to production data + hosts: all + gather_facts: False + become: True + tasks: + + # WARNING - calling lineinfile on a symlink + # will convert the symlink to a file! + # don't use /edx/etc/server-vars.yml here + # + # What we are doing here is updating the sandbox + # server-vars config file so that when update + # is called it will use the new MYSQL connection + # info. + - name: Update RDS to point to the sandbox clone + lineinfile: + dest: /edx/app/edx_ansible/server-vars.yml + line: "{{ item }}" + with_items: + - "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}" + - "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}" + - "EDXAPP_MYSQL_USER: {{ EDXAPP_MYSQL_USER }}" + - "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}" + tags: update_edxapp_mysql_host + + - name: Update mongo to point to the sandbox mongo clone + lineinfile: + dest: /edx/app/edx_ansible/server-vars.yml + line: "{{ item }}" + with_items: + - "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}" + - "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}" + - "EDXAPP_MONGO_USER: {{ EDXAPP_MONGO_USER }}" + - "EDXAPP_MONGO_PASSWORD: {{ EDXAPP_MONGO_PASSWORD }}" + tags: update_edxapp_mysql_host + + - name: call update on edx-platform + shell: "/edx/bin/update edx-platform {{ edxapp_version }}" + tags: update_edxapp_mysql_host diff --git a/playbooks/continuous_delivery/ansible.cfg b/playbooks/continuous_delivery/ansible.cfg new file mode 100644 index 00000000000..9431752b3b4 --- /dev/null +++ b/playbooks/continuous_delivery/ansible.cfg @@ -0,0 +1,15 @@ +# config file for ansible -- http://ansible.github.com +# nearly all parameters can be overridden in ansible-playbook or with command line flags +# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first + +[defaults] + +jinja2_extensions=jinja2.ext.do +host_key_checking=False +roles_path=../../../ansible-roles/roles:../../../ansible-private/roles:../../../ansible-roles/:../../playbooks/roles +library=../library/ +ansible_managed=This file is created and updated by ansible, edit at your peril + +[ssh_connection] +ssh_args=-o ControlMaster=auto -o ControlPersist=60s -o ControlPath="~/.ansible/tmp/ansible-ssh-%h-%p-%r" -o ServerAliveInterval=30 +retries=5 diff --git a/playbooks/continuous_delivery/cleanup.yml b/playbooks/continuous_delivery/cleanup.yml new file mode 100644 index 00000000000..bbe51dc4bff --- /dev/null +++ b/playbooks/continuous_delivery/cleanup.yml @@ -0,0 +1,45 @@ +# This playbook will clean up the work done in the create_ami and launch_instance playbooks consisting of the following work: +# - delete the key used to bring up the instance +# - terminate the instance used to create the AMI +# - delete the ansible-runtime/ +# +# Required variables for this playbook: +# +# - instance_id - the ec2 instance ID use to create the AMI +# - keypair_id - the keypair used to launch the instance. +# +# Other variables +# - ec2_region - The region used to create the AMI +# +# Example command line to run this playbook: +# ansible-playbook -vvvv -i "localhost," -c local \ +# -e @overrides.yml \ +# -e @/tmp/ansible-runtime/d057a5d9-4fc5-4a21-9646-4c135be0b7c8/launch_info.yml \ +# cleanup.yml +# + +- hosts: all + vars: + ec2_region: us-east-1 + ec2_timeout: 300 + artifact_path: /tmp/ansible-runtime + gather_facts: False + connection: local + tasks: + + - name: Delete keypair + ec2_key: + state: absent + region: "{{ ec2_region }}" + name: "{{ keypair_id }}" + + - name: Terminate Instance + ec2: + region: "{{ ec2_region }}" + state: absent + instance_ids: "{{ instance_id }}" + + - name: cleanup local file system + file: + path: "{{ artifact_path }}" + state: absent diff --git a/playbooks/continuous_delivery/create_ami.yml b/playbooks/continuous_delivery/create_ami.yml new file mode 100644 index 00000000000..db5e2e0ec9f --- /dev/null +++ b/playbooks/continuous_delivery/create_ami.yml @@ -0,0 +1,126 @@ +# This playbook will create an AMI from an EC2 instance launched with the +# continuous_deliver/launche_instance playbook. +# +# Required variables for this playbook: +# +# - instance_id - the ec2 instance ID use to create the AMI +# - edx_environment - value to use for the environment tag +# - deployment - value to use for the deploy tag +# - app_repo - the url of the github repo for this app +# - app_version - git hash of the app (play, service, IDA) being deployed +# - play - the play that was run +# - cache_id - the cache_id version +# +# Other variables +# - ec2_region - The region used to create the AMI +# - ami_creation_timeout - how long to wait before giving up on AMI creation in seconds +# - ami_wait - (yes/no) should ansible pause while +# - no_reboot - (yes/no) should the instance not be rebooted during AMI creation +# - artifact_path - the path to where this ansible run stores the artifacts for the pipeline +# - extra_name_identifier - Makes each AMI unique if desired - Default: 0 +# - version_tags - A mapping of {app: [repo, version], ...}, used to generate +# a "version:app = repo version" tag on the AMI +# +# Example command line to run this playbook: +# ansible-playbook -vvvv -i "localhost," -c local \ +# -e @overrides.yml \ +# -e @/tmp/ansible-runtime/d057a5d9-4fc5-4a21-9646-4c135be0b7c8/launch_info.yml \ +# -e play=pipline-test \ +# -e deployment=edx \ +# -e edx_environment=sandbox \ +# -e app_version=12345 \ +# -e cache_id=12345 \ +# create_ami.yml +# + +- hosts: all + vars: + ec2_region: us-east-1 + ami_wait: yes + stop_wait: yes + ami_creation_timeout: 5400 + no_reboot: no + artifact_path: /tmp/ansible-runtime + extra_name_identifier: 0 + gather_facts: False + connection: local + tasks: + + - name: Fetch tags on the instance + ec2_tag: + region: "{{ ec2_region }}" + resource: "{{ instance_id }}" + state: list + register: instance_tags + retries: 3 + until: instance_tags is succeeded + + - name: Stop instance + ec2: + instance_ids: "{{ instance_id }}" + state: stopped + wait: "{{ stop_wait }}" + region: "{{ ec2_region }}" + + - name: Create AMI + ec2_ami: + instance_id: "{{ instance_id }}" + name: "{{ edx_environment }} -- {{ deployment }} -- {{ play }} -- {{ extra_name_identifier }} -- {{ app_version[:7] }}" + region: "{{ ec2_region }}" + wait: "{{ ami_wait }}" + wait_timeout: "{{ ami_creation_timeout }}" + no_reboot: "{{ no_reboot }}" + description: "AMI built via edX continuous delivery pipeline - Ansible version: {{ ansible_version }}" + # used a JSON object here as there is a string interpolation in the keys. + tags: "{ + 'version:{{ play }}-from-pipeline':'{{ app_repo }} {{ app_version }}', + 'play':'{{ play }}', + 'cache_id':'{{ cache_id }}', + 'environment':'{{ edx_environment }}', + 'deployment':'{{ deployment }}' + }" + register: ami_register + + - name: Allow AMI to be launched by another account + ec2_ami: + image_id: "{{ ami_register.image_id }}" + state: present + launch_permissions: + user_ids: "{{ allowed_accounts }}" + when: allowed_accounts is defined + + - name: Add any tags that are on the instance to the AMI + ec2_tag: + region: "{{ ec2_region }}" + resource: "{{ ami_register.image_id }}" + tags: "{{ instance_tags.tags }}" + + - name: Add any version tags that were passed on the commandline + ec2_tag: + region: "{{ ec2_region }}" + resource: "{{ ami_register.image_id }}" + tags: "{ + {% for name, (repo, version) in version_tags.items() %} + 'version:{{ name }}': '{{ repo }} {{ version }}', + {% endfor %} + }" + when: version_tags is defined + + - name: Fetch tags on the AMI + ec2_tag: + region: "{{ ec2_region }}" + resource: "{{ ami_register.image_id }}" + state: list + register: ami_tags + + - name: Ensure artifact directory exists + file: + path: "{{ artifact_path }}" + state: directory + force: yes + + - name: Generate artifact container the instance_id + template: + src: templates/local/ami_template.yml.j2 + dest: "{{ artifact_path }}/ami.yml" + mode: 0600 diff --git a/playbooks/continuous_delivery/launch_instance.yml b/playbooks/continuous_delivery/launch_instance.yml new file mode 100644 index 00000000000..6352868e361 --- /dev/null +++ b/playbooks/continuous_delivery/launch_instance.yml @@ -0,0 +1,139 @@ +# This playbook will launch an ec2 instance in a VPC. +# This instance will have an autogenerated key. +# +# required variables for this playbook: +# - base_ami_id - The base base AMI-ID +# - ec2_vpc_subnet_id - The Subnet ID to bring up the instance +# - ec2_security_group_id - The security group ID to use +# - ec2_instance_profile_name - The instance profile that should be used to launch this AMI +# +# Other Variables: +# - ec2_region - The region the server should be brought up in +# - ec2_instance_type - The instance type to use +# - ebs_volume_size - Size in GB for the root volume +# - ec2_timeout - Time in seconds to wait for an ec2 instance become available +# - ec2_assign_public_ip - (yes/no) should the instance have a public IP address? +# - ami_id - overrides 'base_ami_id' (used when passing in the ami.yml file produced by +# the create_ami.yml ansilble script) +# +# This playbook generates a directory with 2 artifact files: +# - launch_template.yml - A yaml file with information such as the instance ID and internal IP address of the instance launched +# - key.pem - The private key file for the newly generated keypair +# +# Example command line to run this playbook: +# ansible-playbook -i "localhost," -c local -e @overrides.yml launch_instance.yml +# + +- hosts: all + vars: + artifact_path: /tmp/ansible-runtime + ec2_region: us-east-1 + ec2_instance_type: t2.medium + ebs_volume_size: 8 + ec2_timeout: 500 + ec2_assign_public_ip: no + automation_prefix: "gocd automation run -- {{ ansible_date_time.iso8601 }} -- " + gather_facts: True + connection: local + tasks: + + - name: Generate UUID for keypair + command: cat /proc/sys/kernel/random/uuid + register: unique_key_name + + - name: Generate ec2 keypair to use for this instance + ec2_key: + name: "{{ automation_prefix }} {{ unique_key_name.stdout }}" + region: "{{ ec2_region }}" + register: ssh_key_register + no_log: True + + - name: Ensure artifact directory exists + file: + path: "{{ artifact_path }}" + state: directory + force: yes + + - name: Use ami_id if available + set_fact: + launch_ami_id: "{{ ami_id }}" + when: ami_id is defined + + - name: Use base_ami_id if ami_id is not available + set_fact: + launch_ami_id: "{{ base_ami_id }}" + when: ami_id is not defined + + - name: Launch EC2 instance + ec2: + region: "{{ ec2_region }}" + key_name: "{{ automation_prefix }} {{ unique_key_name.stdout }}" + instance_type: "{{ ec2_instance_type }}" + image: "{{ launch_ami_id }}" + group_id: "{{ ec2_security_group_id }}" + count: 1 + vpc_subnet_id: "{{ ec2_vpc_subnet_id }}" + assign_public_ip: "{{ ec2_assign_public_ip }}" + volumes: + - device_name: /dev/sda1 + volume_type: 'gp2' + volume_size: "{{ ebs_volume_size }}" + wait: yes + wait_timeout: "{{ ec2_timeout }}" + instance_profile_name: "{{ ec2_instance_profile_name }}" + register: ec2_instance_register + + - name: Tag EC2 instance + ec2_tag: + region: "{{ ec2_region }}" + resource: "{{ ec2_instance_register.instances[0].id }}" + state: present + tags: + Name: "{{ automation_prefix }} {{ unique_key_name.stdout }}" + gocd_pipeline_url: "{{ lookup('ansible.builtin.env', 'GO_SERVER_URL')|default('', True) }}/pipelines/value_stream_map/{{ lookup('ansible.builtin.env', 'GO_PIPELINE_NAME')|default('', True) }}/{{ lookup('ansible.builtin.env', 'GO_PIPELINE_COUNTER')|default('', True) }}" + gocd_pipepline_name: "{{ lookup('ansible.builtin.env', 'GO_PIPELINE_NAME')|default('', True) }}" + gocd_pipepline_label: "{{ lookup('ansible.builtin.env', 'GO_PIPELINE_LABEL')|default('', True) }}" + gocd_pipepline_counter: "{{ lookup('ansible.builtin.env', 'GO_PIPELINE_COUNTER')|default('', True) }}" + gocd_stage_counter: "{{ lookup('ansible.builtin.env', 'GO_STAGE_COUNTER')|default('', True) }}" + register: tag_ec2_instance + retries: 6 + delay: 15 + until: tag_ec2_instance is succeeded + + - name: Wait for SSH to come up (private ip) + wait_for: + host: "{{ ec2_instance_register.instances[0].private_ip }}" + port: 22 + delay: 60 + timeout: "{{ ec2_timeout }}" + state: started + when: not ec2_assign_public_ip + + - name: Wait for SSH to come up (public ip) + wait_for: + host: "{{ ec2_instance_register.instances[0].public_ip }}" + port: 22 + delay: 60 + timeout: "{{ ec2_timeout }}" + state: started + when: ec2_assign_public_ip + + - name: Generate artifact for jobs down stream + template: + src: templates/local/launch_template.yml.j2 + dest: "{{ artifact_path }}/launch_info.yml" + mode: 0600 + + - name: Generate key material artifact for jobs down stream + template: + src: templates/local/key.pem.j2 + dest: "{{ artifact_path }}/key.pem" + mode: 0600 + + - name: Generate ansible inventory file + template: + src: templates/local/inventory.j2 + dest: "{{ artifact_path }}/ansible_inventory" + mode: 0600 + + diff --git a/playbooks/continuous_delivery/prospectus_download_redirects.yml b/playbooks/continuous_delivery/prospectus_download_redirects.yml new file mode 100644 index 00000000000..34d03109641 --- /dev/null +++ b/playbooks/continuous_delivery/prospectus_download_redirects.yml @@ -0,0 +1,26 @@ +- name: Download edX Prospectus Service nginx redirects file + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'prospectus' + PROSPECTUS_DATA_DIR: "/edx/var/prospectus" + NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE: True + NGINX_MAP_HASH_MAX_SIZE: 4096 + NGINX_MAP_HASH_BUCKET_SIZE: 128 + PROSPECTUS_ENABLED: True + PROSPECTUS_SANDBOX_BUILD: FALSE + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + tasks: + - name: Create redirects config directory + file: + path: "{{ prospectus_redirect_file | dirname }}" + state: directory + - name: Upload prospectus redirects from GoCD + copy: + src: "{{ artifact_path }}/prospectus-redirects.conf" + dest: "{{ prospectus_redirect_file }}" diff --git a/playbooks/edx-east/roles b/playbooks/continuous_delivery/roles similarity index 100% rename from playbooks/edx-east/roles rename to playbooks/continuous_delivery/roles diff --git a/playbooks/continuous_delivery/rollback_migrations.yml b/playbooks/continuous_delivery/rollback_migrations.yml new file mode 100644 index 00000000000..d63d74e6a21 --- /dev/null +++ b/playbooks/continuous_delivery/rollback_migrations.yml @@ -0,0 +1,128 @@ +# This playbook will check for migrations to rollback for Django applications within a larger +# Django project. The provided input file will determine what migrations should be rolled back. +# +# The playbook uses the Django management commands found in this Django app repo: +# https://github.com/openedx/edx-django-release-util +# So the Django app above needs to be installed in the Django project. +# +# Required variables for this playbook: +# +# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath +# this directory in a directory with the same name as APPLICATION_NAME. +# NOTE: It is assumed that edx-django-release-util is one of its INSTALLED_APPS. +# - APPLICATION_NAME - The name of the application that we are migrating. +# - APPLICATION_USER - user which is meant to run the application +# - ARTIFACT_PATH - the path where the migration artifacts should be copied after completion +# - initial_states - An array of the migrations +# - database - Name of the database to run the rollback against +# +# Other variables: +# - migration_result - the filename where the migration output is saved +# - SUB_APPLICATION_NAME - used for migrations in edxapp {lms|cms}, must be specified +# when APPLICATION_NAME is edxapp +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production +# +# Example command line to run this playbook: +# ansible-playbook \ +# -vvvv \ +# -i ../{artifact_path}/ansible_inventory \ +# --private-key=$PRIVATE_KEY \ +# --module-path=playbooks/library \ +# --user=ubuntu \ +# -e APPLICATION_PATH=$APPLICATION_PATH \ +# -e APPLICATION_NAME=$APPLICATION_NAME \ +# -e APPLICATION_USER=$APPLICATION_USER \ +# -e ARTIFACT_PATH=`/bin/pwd`/../{artifact_path}/migrations \ +# -e DB_MIGRATION_USER=$DB_MIGRATION_USER \ +# -e DB_MIGRATION_PASS=$DB_MIGRATION_PASS \ +# -e @../{artifact_path}/migration_input_file.yml \ +# -e SUB_APPLICATION_NAME={sub_application_name} \ +# playbooks/continuous_delivery/rollback_migrations.yml + +- hosts: all + gather_facts: false + become: true + + vars: + COMMAND_PREFIX: ". {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; DB_MIGRATION_USER={{ DB_MIGRATION_USER }} DB_MIGRATION_PASS='{{ DB_MIGRATION_PASS }}' {{ APPLICATION_PATH }}/venvs/{{ APPLICATION_NAME }}/bin/python /edx/bin/manage.{{ APPLICATION_NAME }} " + EDX_PLATFORM_SETTINGS: "production" + rollback_result: rollback_result.yml + original_state: original_state.yml + migration_plan: migration_plan.yml + migration_result: migration_result.yml + database: default + + vars_files: + - roles/edxapp/defaults/main.yml + + tasks: + - name: Create a temporary directory for the migration output. + command: mktemp -d + become_user: "{{ APPLICATION_USER }}" + register: temp_output_dir + + - name: generate current migration state + shell: > + {{ COMMAND_PREFIX }} show_unapplied_migrations + --output_file '{{ temp_output_dir.stdout }}/{{ original_state }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME != "edxapp" + + - name: generate current migration state for edxapp + shell: > + {{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} show_unapplied_migrations + --database '{{ database }}' + --output_file '{{ temp_output_dir.stdout }}/{{ database }}_{{ original_state }}' + --settings '{{ EDX_PLATFORM_SETTINGS }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME == "edxapp" + + - name: migrate to original versions + shell: > + {{ COMMAND_PREFIX }} run_specific_migrations + --migration '{{ item.app }}' '{{ item.migration }}' + --output_file '{{ temp_output_dir.stdout }}/{{ migration_plan }}' + become_user: "{{ APPLICATION_USER }}" + with_items: "{{ initial_states }}" + when: APPLICATION_NAME != "edxapp" + + - name: migrate to original versions for edxapp + shell: > + {{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} run_specific_migrations + --migration '{{ item.app }}' '{{ item.migration }}' + --output_file '{{ temp_output_dir.stdout }}/{{ database }}_{{ migration_plan }}' + --database '{{ database }}' + --settings '{{ EDX_PLATFORM_SETTINGS }}' + become_user: "{{ APPLICATION_USER }}" + with_items: "{{ initial_states }}" + when: APPLICATION_NAME == "edxapp" + + - name: generate post rollback migration state + shell: > + {{ COMMAND_PREFIX }} show_unapplied_migrations + --output_file '{{ temp_output_dir.stdout }}/{{ migration_result }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME != "edxapp" + + - name: generate post migration state for edxapp + shell: > + {{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} show_unapplied_migrations + --database '{{ database }}' + --output_file '{{ temp_output_dir.stdout }}/{{ database }}_{{ migration_result }}' + --settings '{{ EDX_PLATFORM_SETTINGS }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME == "edxapp" + + - name: List all migration files + action: "command ls -1 {{ temp_output_dir.stdout }}" + register: migration_files + + - name: Transfer artifacts to the proper place. + fetch: + src: "{{ temp_output_dir.stdout }}/{{ item }}" + dest: "{{ ARTIFACT_PATH }}/" + flat: True + fail_on_missing: True + mode: 0700 + with_items: + - "{{ migration_files.stdout_lines }}" diff --git a/playbooks/continuous_delivery/run_management_command.yml b/playbooks/continuous_delivery/run_management_command.yml new file mode 100644 index 00000000000..3d69f06a365 --- /dev/null +++ b/playbooks/continuous_delivery/run_management_command.yml @@ -0,0 +1,39 @@ +# This playbook will run a management command for a Django application. +# +# +# Required variables for this playbook: +# +# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath +# this directory in a directory with the same name as APPLICATION_NAME. +# - APPLICATION_NAME - The name of the application that we are running against +# - APPLICATION_USER - user which is meant to run the application +# - COMMAND - name of the management command to be run +# +# Other variables: +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production +# +# Example command line to run this playbook: +# ansible-playbook -vvvv -i "localhost," -c local \ +# -e @overrides.yml \ +# run_management_command.yml +# + + + +- hosts: all + vars: + EDX_PLATFORM_SETTINGS: "production" + COMMAND_PREFIX: " . {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; {{ APPLICATION_PATH }}/venvs/{{ APPLICATION_NAME }}/bin/python /edx/bin/manage.{{ APPLICATION_NAME }}" + gather_facts: False + become: True + tasks: + + - name: run management command + shell: '{{ COMMAND_PREFIX }} {{ COMMAND }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME != "edxapp" + + - name: run edxapp management command + shell: '{{ COMMAND_PREFIX }} {{ COMMAND }} --settings "{{ EDX_PLATFORM_SETTINGS }}"' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME == "edxapp" diff --git a/playbooks/continuous_delivery/run_migrations.yml b/playbooks/continuous_delivery/run_migrations.yml new file mode 100644 index 00000000000..a6c73846579 --- /dev/null +++ b/playbooks/continuous_delivery/run_migrations.yml @@ -0,0 +1,85 @@ +# This playbook will check for migrations that need to be run for Django applications within a larger +# Django application. If migrations exist, it will run the migrations while saving the output as an artifact. +# +# The playbook uses the Django management commands found in this Django app repo: +# https://github.com/openedx/edx-django-release-util +# So the Django app above needs to be installed in the Django app being checked for migrations. +# +# Required variables for this playbook: +# +# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath +# this directory in a directory with the same name as APPLICATION_NAME. +# NOTE: It is assumed that edx-django-release-util is one of its INSTALLED_APPS. +# - APPLICATION_NAME - The name of the application that we are migrating. +# - APPLICATION_USER - user which is meant to run the application +# - ARTIFACT_PATH - the path where the migration artifacts should be copied after completion +# - DB_MIGRATION_USER - the database username +# - DB_MIGRATION_PASS - the database password +# +# Other variables: +# - migration_plan - the filename where the unapplied migration YAML output is stored +# - migration_result - the filename where the migration output is saved +# - SUB_APPLICATION_NAME - used for migrations in edxapp {lms|cms}, must be specified +# when APPLICATION_NAME is edxapp +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production +# +# Example command line to run this playbook: +# ansible-playbook -vvvv -i "localhost," -c local \ +# -e @overrides.yml \ +# run_migrations.yml +# + + + +- hosts: all + vars: + migration_plan: migration_plan.yml + migration_result: migration_result.yml + EDX_PLATFORM_SETTINGS: "production" + COMMAND_PREFIX: " . {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; DB_MIGRATION_USER={{ DB_MIGRATION_USER }} DB_MIGRATION_PASS={{ DB_MIGRATION_PASS }} {{ APPLICATION_PATH }}/venvs/{{ APPLICATION_NAME }}/bin/python /edx/bin/manage.{{ APPLICATION_NAME }}" + vars_files: + - roles/edxapp/defaults/main.yml + gather_facts: False + become: True + tasks: + + - name: Create a temporary directory for the migration output. + command: mktemp -d + become_user: "{{ APPLICATION_USER }}" + register: temp_output_dir + + - name: generate list of unapplied migrations + shell: '{{ COMMAND_PREFIX }} show_unapplied_migrations --output_file "{{ temp_output_dir.stdout }}/{{ migration_plan }}"' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME != "edxapp" + + - name: generate list of edxapp unapplied migrations + shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} show_unapplied_migrations --database "{{ item }}" --output_file "{{ temp_output_dir.stdout }}/{{ item }}_{{ migration_plan }}" --settings "{{ EDX_PLATFORM_SETTINGS }}"' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME == "edxapp" and item != "read_replica" + with_items: "{{ edxapp_databases.keys()|list }}" + + - name: migrate to apply any unapplied migrations + shell: '{{ COMMAND_PREFIX }} run_migrations --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME != "edxapp" + + - name: migrate to apply any edxapp unapplied migrations + shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} run_migrations --database "{{ item }}" --settings "{{ EDX_PLATFORM_SETTINGS }}" --output_file "{{ temp_output_dir.stdout }}/{{ item }}_{{ migration_result }}"' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME == "edxapp" and item != "read_replica" + with_items: "{{ edxapp_databases.keys()|list }}" + + - name: List all migration files + action: "command ls -1 {{ temp_output_dir.stdout }}" + register: migration_files + + - name: Transfer artifacts to the proper place. + fetch: + src: "{{ temp_output_dir.stdout }}/{{ item }}" + dest: "{{ ARTIFACT_PATH }}/" + flat: True + fail_on_missing: True + mode: 0700 + with_items: + - "{{ migration_files.stdout_lines }}" diff --git a/playbooks/continuous_delivery/templates/local/ami_template.yml.j2 b/playbooks/continuous_delivery/templates/local/ami_template.yml.j2 new file mode 100644 index 00000000000..67aea65d026 --- /dev/null +++ b/playbooks/continuous_delivery/templates/local/ami_template.yml.j2 @@ -0,0 +1,4 @@ +ami_id: {{ ami_register.image_id }} +ami_message: {{ ami_register.msg }} +ami_state: {{ ami_register.state }} +{{ ami_tags.tags | to_nice_yaml }} \ No newline at end of file diff --git a/playbooks/continuous_delivery/templates/local/inventory.j2 b/playbooks/continuous_delivery/templates/local/inventory.j2 new file mode 100644 index 00000000000..a96ba9c5b48 --- /dev/null +++ b/playbooks/continuous_delivery/templates/local/inventory.j2 @@ -0,0 +1,7 @@ +{% for instance in ec2_instance_register.instances %} +{% if ec2_assign_public_ip %} +{{ instance.public_ip }} +{% else %} +{{ instance.private_ip }} +{% endif %} +{% endfor %} diff --git a/playbooks/continuous_delivery/templates/local/key.pem.j2 b/playbooks/continuous_delivery/templates/local/key.pem.j2 new file mode 100644 index 00000000000..55a49146b68 --- /dev/null +++ b/playbooks/continuous_delivery/templates/local/key.pem.j2 @@ -0,0 +1 @@ +{{ ssh_key_register.key.private_key }} \ No newline at end of file diff --git a/playbooks/continuous_delivery/templates/local/launch_template.yml.j2 b/playbooks/continuous_delivery/templates/local/launch_template.yml.j2 new file mode 100644 index 00000000000..f9f954caea6 --- /dev/null +++ b/playbooks/continuous_delivery/templates/local/launch_template.yml.j2 @@ -0,0 +1,4 @@ +keypair_id: {{ unique_key_name.stdout }} +key_material_file: {{ artifact_path }}/key.pem +instance_id: {{ ec2_instance_register.instances[0].id }} +instance_ip: {{ ec2_instance_register.instances[0].public_ip }} diff --git a/playbooks/continuous_delivery/upload_assets.yml b/playbooks/continuous_delivery/upload_assets.yml new file mode 100644 index 00000000000..8cbd951ca0d --- /dev/null +++ b/playbooks/continuous_delivery/upload_assets.yml @@ -0,0 +1,46 @@ +# This playbook will upload assets from a django service to an S3 bucket +# +# +# Required variables for this playbook: +# +# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath +# this directory in a directory with the same name as APPLICATION_NAME. +# - APPLICATION_NAME - The name of the application that we are running against +# - APPLICATION_USER - user which is meant to run the application +# - BUCKET_PATH - name of the bucket to upload assets to +# +# Other variables: +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production +# +# Example command line to run this playbook: +# ansible-playbook -vvvv -i "localhost," -c local \ +# -e @overrides.yml \ +# run_management_command.yml +# + + + +- hosts: all + vars: + EDX_PLATFORM_SETTINGS: "production" + # Both LMS and Studio gather their assets to the same directory, + # so most of the time leaving the default sub-application will be fine. + SUB_APPLICATION_NAME: "lms" + COMMAND_PREFIX: " . {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; {{ APPLICATION_PATH }}/venvs/{{ APPLICATION_NAME }}/bin/python /edx/bin/manage.{{ APPLICATION_NAME }}" + STATIC_ROOT: >- + $({{ COMMAND_PREFIX }} shell --command "from django.conf import settings; print(getattr(settings, 'STATIC_ROOT', ''))") + STATIC_ROOT_EDXAPP: >- + $({{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} shell --settings "{{ EDX_PLATFORM_SETTINGS }}" --command "from django.conf import settings; print(getattr(settings, 'STATIC_ROOT', ''))") + gather_facts: False + become: True + tasks: + + - name: sync assets to s3 + shell: 'aws s3 sync {{ STATIC_ROOT }} {{ BUCKET_PATH }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME != "edxapp" + + - name: sync edxapp assets to s3 + shell: 'aws s3 sync {{ STATIC_ROOT_EDXAPP }} {{ BUCKET_PATH }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME == "edxapp" diff --git a/playbooks/course_authoring.yml b/playbooks/course_authoring.yml new file mode 100644 index 00000000000..8e6fae9fb04 --- /dev/null +++ b/playbooks/course_authoring.yml @@ -0,0 +1,16 @@ +- name: Deploy the Course Authoring MFE + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'course-authoring' + COURSE_AUTHORING_ENABLED: True + COURSE_AUTHORING_SANDBOX_BUILD: False + roles: + - role: course_authoring + MFE_NAME: course-authoring + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELICE_INFRASTRUCTURE \ No newline at end of file diff --git a/playbooks/create_all_user_types.yml b/playbooks/create_all_user_types.yml new file mode 100644 index 00000000000..6fc4ef89308 --- /dev/null +++ b/playbooks/create_all_user_types.yml @@ -0,0 +1,32 @@ +# This is a test play that creates all supported user +# types using the user role. Example only, not meant +# to be run on a real system +- name: Create all user types (test play) + hosts: all + become: True + gather_facts: False + vars_files: + - 'roles/edxapp/defaults/main.yml' + - 'roles/common_vars/defaults/main.yml' + - 'roles/analytics-server/defaults/main.yml' + - 'roles/analytics/defaults/main.yml' + pre_tasks: + - fail: msg="You must pass a user into this play" + when: user is not defined + - name: give access with no sudo + set_fact: + CUSTOM_USER_INFO: + - name: "{{ user }}" + github: true + - name: test-admin-user + type: admin + - name: test-normal-user + - name: test-restricted-user-edxapp + type: restricted + sudoers_template: 99-edxapp-manage-cmds.j2 + - name: test-restricted-user-anayltics + type: restricted + sudoers_template: 99-analytics-manage-cmds.j2 + roles: + - role: user + user_info: "{{ CUSTOM_USER_INFO }}" diff --git a/playbooks/edx-east/create_cname.yml b/playbooks/create_cname.yml similarity index 100% rename from playbooks/edx-east/create_cname.yml rename to playbooks/create_cname.yml diff --git a/playbooks/create_db_and_users.yml b/playbooks/create_db_and_users.yml new file mode 100644 index 00000000000..58001c9a052 --- /dev/null +++ b/playbooks/create_db_and_users.yml @@ -0,0 +1,117 @@ +# +# This play will create databases and user for an application. +# It can be run like so: +# +# ansible-playbook -c local -i 'localhost,' create_db_and_users.yml -e@./db.yml +# +# If running ansible from a python virtualenv you will need a command like the following +# +# ansible-playbook -c local -i 'localhost,' create_db_and_users.yml -e@./db.yml -e "ansible_python_interpreter=$(which python)" +# +# if you get an SSL error connecting to MySQL, make sure you have the following in ~/.my.cnf +# +# [client] +# ssl_mode=DISABLED +# +# the content of db.yml contains the following dictionaries +# +# database_connection: &default_connection +# login_host: "mysql.example.org" +# login_user: "root" +# login_password: "super-secure-password" + +# DEFAULT_ENCODING: "utf8" + +# databases: +# reports: +# state: "present" +# encoding: "{{ DEFAULT_ENCODING }}" +# <<: *default_connection +# application: +# state: "present" +# encoding: "{{ DEFAULT_ENCODING }}" +# <<: *default_connection + +# database_users: +# migrate: +# state: "present" +# password: "user-with-ddl-privs" +# host: "%" +# privileges: +# - "reports.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX" +# - "wwc.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX" +# <<: *default_connection +# runtime: +# state: "present" +# password: "user-with-dml-privs" +# host: "%" +# privileges: +# - "reports.*:SELECT" +# - "wwc.*:SELECT,INSERT,UPDATE,DELETE" +# <<: *default_connection + +- name: Create databases and users + hosts: all + gather_facts: False + tasks: + # Install required library, currently this needs to be available + # to system python. + - name: install python mysqldb module + pip: name={{item}} state=present + with_items: + - mysqlclient + + - name: create mysql databases + mysql_db: + db: "{{ item.name}}" + state: "{{ item.state }}" + encoding: "{{ item.encoding }}" + login_host: "{{ item.login_host }}" + login_user: "{{ item.login_user }}" + login_password: "{{ item.login_password }}" + with_items: "{{ databases }}" + tags: + - dbs + + - name: create mysql users and assign privileges + mysql_user: + name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" + priv: "{{ '/'.join(item.privileges) }}" + password: "{{ item.password }}" + host: "{{ item.host }}" + login_host: "{{ item.login_host }}" + login_user: "{{ item.login_user }}" + login_password: "{{ item.login_password }}" + append_privs: yes + when: item.mysql_plugin is not defined + with_items: "{{ database_users }}" + tags: + - users + + # If plugin is AWSAuthenticationPlugin, then we can’t create mysql user using the mysql_user module + # to create user for AWS RDS IAM authentication as it does not support plugin as parameter + + - name: create mysql users for AWS RDS IAM authentication + shell: | + mysql -u"{{ item.login_user }}" -p"{{ item.login_password }}" -h"{{ item.login_host }}" -e "SET @sql := CASE WHEN (SELECT count(*) FROM mysql.user WHERE User='{{ item.name }}') = 0 THEN 'CREATE USER {{ item.name }} IDENTIFIED WITH AWSAuthenticationPlugin as \'RDS\'' ELSE 'Select 0' END;PREPARE stmt FROM @sql;EXECUTE stmt;DEALLOCATE PREPARE stmt" + when: item.mysql_plugin is defined and item.state == 'present' and item.mysql_plugin == 'AWSAuthenticationPlugin' + with_items: "{{ database_users }}" + tags: + - users + + - name: assign privileges to AWS RDS IAM users + shell: | + mysql -u"{{ item.login_user }}" -p"{{ item.login_password }}" -h"{{ item.login_host }}" -e "GRANT {{ item.privileges }} to '{{ item.name }}'@'{{ item.host }}'" + when: item.mysql_plugin is defined and item.state == 'present' and item.mysql_plugin == 'AWSAuthenticationPlugin' + with_items: "{{ database_users }}" + tags: + - users + + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/mysql_rds_set_configuration.html + - name: Set binlog retention length + shell: | + mysql -u"{{ database_connection.login_user }}" -p"{{ database_connection.login_password }}" -h"{{ database_connection.login_host }}" -e "call mysql.rds_set_configuration('binlog retention hours', {{RDS_BINLOG_RETENTION_HOURS | default(168)}});" + when: RDS_BINLOG_RETENTION_HOURS is defined + tags: + - users diff --git a/playbooks/create_django_ida.yml b/playbooks/create_django_ida.yml new file mode 100644 index 00000000000..be7430ca59d --- /dev/null +++ b/playbooks/create_django_ida.yml @@ -0,0 +1,9 @@ +--- +# Creates a new ansible role +# Usage: +# ansible-playbook ./create_django_ida.yml -i "localhost," -c local -e my_role_name=my_awesome_role +# +- hosts: localhost + gather_facts: False + roles: + - ansible-role-django-ida diff --git a/playbooks/create_mongo_users.yml b/playbooks/create_mongo_users.yml new file mode 100644 index 00000000000..e03c674cb27 --- /dev/null +++ b/playbooks/create_mongo_users.yml @@ -0,0 +1,56 @@ +# +# This play expects PyMongo to be installed locally. You need to provide +# the hostname or IP address of one of the mongo hosts, +# +# ansible-playbook -i localhost, create_mongo_users.yml -e@edx.yml -e@stage-edx.yml -e@db/edxapp-mongo.yml +# +# edxapp-mongo.yml should define MONGO_USERS with the following format +# +# MONGO_USERS: +# - user: edxapp001 +# password: secret +# database: edxapp +# roles: readWrite +# +# define optional MONGO_ROLES in edxapp-mongo.yml with the following format to create and use custom mongodb role +# +# MONGO_ROLES: +# - database: edxapp +# role: edxapp_readWrite +# privileges: "'createCollection', 'createIndex', 'dropIndex', 'find', 'insert', 'listIndexes', 'listCollections', 'remove', 'renameCollectionSameDB', 'update'" +# +# It should also define a login_host and repl_set. You can set login_host to +# be any member of your cluster as this code will find and connect to the +# primary. +# +# login_host: 10.17.90.123 +# repl_set: prod-edx-edxapp + +- name: Create mongo users + hosts: all + gather_facts: False + connection: local +# This allows you to use your virtualenv's pymongo instead of installing it globally + vars: + ansible_python_interpreter: "/usr/bin/env python" + tasks: + - name: install python mongo module + pip: name=pymongo state=present + + - name: create a mongodb role + shell: mongo -u {{ MONGO_ADMIN_USER }} -p {{ MONGO_ADMIN_PASSWORD }} --host {{ repl_set }}/{{ login_host }} --authenticationDatabase admin --eval "db = db.getSiblingDB('{{ item.database }}');if ( db.getRole('{{ item.role }}') === null ){ db.createRole({ role{{':'}} '{{ item.role }}', privileges{{':'}} [{ resource{{':'}} { db{{':'}} '{{ item.database }}', collection{{':'}} '' }, actions{{':'}} [{{ item.privileges }}]}], roles{{':'}} []}); } else { db.updateRole('{{ item.role }}',{ privileges{{':'}} [{ resource{{':'}} { db{{':'}} '{{ item.database }}', collection{{':'}} '' }, actions{{':'}} [{{ item.privileges }}]}], roles{{':'}} []}); }" + with_items: "{{ MONGO_ROLES }}" + when: MONGO_ROLES is defined + + - name: create a mongodb user + mongodb_user: + database: "{{ item.database }}" + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + login_host: "{{ login_host }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ repl_set }}" + with_items: "{{ MONGO_USERS }}" diff --git a/playbooks/create_pgsql_db_and_users.yml b/playbooks/create_pgsql_db_and_users.yml new file mode 100644 index 00000000000..52a45628fbd --- /dev/null +++ b/playbooks/create_pgsql_db_and_users.yml @@ -0,0 +1,80 @@ +# +# This play will create databases and user for an application. +# It can be run like so: +# +# ansible-playbook -c local -i 'localhost,' create_pgsql_db_and_users.yml -e@./db.yml +# +# If running ansible from a python virtualenv you will need a command like the following +# +# ansible-playbook -c local -i 'localhost,' create_pgsql_db_and_users.yml -e@./db.yml -e "ansible_python_interpreter=$(which python)" +# +# the content of db.yml contains the following dictionaries + +# database_connection: &default_connection +# login_host: "pgsql.example.org" +# login_user: "root" +# login_password: "super-secure-password" + +# databases: +# - name: "example" +# state: "present" +# <<: *default_connection + +# database_users: +# - name: "example" +# db: "example" +# state: "present" +# password: "user-password" +# privs: "ALL" +# db_objects: "ALL_DEFAULT" +# db_object_type: "default_privs" +# <<: *default_connection + +- name: Create databases and users + hosts: all + gather_facts: False + tasks: + # Install required library, currently this needs to be available + # to system python. + - name: install PostgreSQL client + pip: name={{item}} state=present + with_items: + - psycopg2-binary + + - name: create PostgreSQL databases + postgresql_db: + db: "{{ item.name}}" + state: "{{ item.state }}" + login_host: "{{ item.login_host }}" + login_user: "{{ item.login_user }}" + login_password: "{{ item.login_password }}" + with_items: "{{ databases }}" + tags: + - dbs + + - name: Create PostgreSQL users + postgresql_user: + db: "{{ item.db }}" + name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" + password: "{{ item.password }}" + login_host: "{{ item.login_host }}" + login_user: "{{ item.login_user }}" + login_password: "{{ item.login_password }}" + with_items: "{{ database_users }}" + tags: + - users + + - name: Assign privileges + postgresql_privs: + roles: "{{ item.name }}" + db: "{{ item.db }}" + privs: "{{ item.privs }}" + objs: "{{ item.db_objects }}" + type: "{{ item.db_object_type }}" + login_host: "{{ item.login_host }}" + login_user: "{{ item.login_user }}" + login_password: "{{ item.login_password }}" + with_items: "{{ database_users }}" + tags: + - privileges diff --git a/playbooks/create_rds.yml b/playbooks/create_rds.yml new file mode 100644 index 00000000000..69507e32c6b --- /dev/null +++ b/playbooks/create_rds.yml @@ -0,0 +1,87 @@ +# This play will create an RDS for an application. +# It can be run like so: +# +# ansible-playbook -c local -i 'localhost,' create_rds.yml -e@./db.yml +# +# where the content of db.yml contains the following settings +# +# It can read from the same config as create_dbs_and_users.yml and needs this +# part of that config +# database_connection: +# login_host: "{{ database_name }}......rds.amazonaws.com" # This is pretty predictable for our accounts. +# login_user: "root" +# login_password: "" # max 41 characters, Only printable ASCII characters besides '/', '@', '\"', ' ' may be used. +# database_name: your-database-name +# database_size: number of gigabytes (integer) +# instance_type: Choose an AWS RDS instance type such as "db.t2.medium" +# aws_region: a full region (such as us-east-1 or us-west-2) not an AZ +# database_engine_version: You should use either or standard or the newest possible, such as "5.6.39" +# maintenance_window: UTC time and day of week to allow maintenance "Mon:16:00-Mon:16:30" +# vpc_security_groups: What security group in the VPC your RDS should belong to (this is separate from your app or elb SG) +# subnet_group: a name of a group in the RDS console that contains subnets, it will pick the appropriate one +# parameter_group: name of the parameter group with overriddent defaults for this RDS +# backup_window: UTC time of the day to take a backup "08:00-08:30" +# backup_retention: Days to keep backups (integer) +# multi_zone: yes or no (whether this RDS is multi-az) +# performance_insights: yes or no (or unset) whether to enable Performance Insights (must be 5.6.40 or greater and not a t2) +# tags: "[{'Key': 'environment', 'Value': 'TBD'}, {'Key': 'deployment', 'Value': 'TBD'}, {'Key': 'deployment', 'Value': 'TBD'}]" + + +- name: Create databases and users + hosts: all + gather_facts: False + tasks: + +# The rds module for ansible only uses boto2, and boto2 defaults to magnetic discs and will +# use io1 if you specify piops, but you can't have gp2. +# Adapted from https://github.com/ansible/ansible-modules-core/issues/633 +# which points you to the various other open github issues. + + - name: Create RDS instance using SSD (gp2) storage + command: "aws rds create-db-instance + --db-instance-identifier {{ database_name }} + --storage-type gp2 + --allocated-storage {{ database_size }} + --db-instance-class {{ instance_type }} + --engine {{ database_engine|default('MySQL') }} + --engine-version {{ database_engine_version }} + --master-username {{ database_connection.login_user }} + --master-user-password {{ database_connection.login_password }} + --vpc-security-group-ids {{ vpc_security_groups}} + --db-subnet-group-name {{ subnet_group }} + --preferred-maintenance-window {{ maintenance_window }} + --db-parameter-group-name {{ parameter_group }} + --backup-retention-period {{ backup_retention }} + --preferred-backup-window {{ backup_window }} + --{{ '' if multi_zone == 'yes' else 'no-' }}multi-az + {{ '--enable-performance-insights' if performance_insights is defined and performance_insights == 'yes' else '' }} + --tags '{{ tags }}' + " + register: result + failed_when: > + result.rc != 0 and ('DBInstanceAlreadyExists' not in result.stderr) + changed_when: "result.rc == 0" + + - name: Wait RDS to be available + rds: + command: facts + region: "{{ aws_region }}" + instance_name: "{{ database_name }}" + register: result + until: result.instance.status == "available" + retries: 20 + delay: 60 + + - name: Add role to DB cluster + command: "aws rds add-role-to-db-cluster + --db-cluster-identifier {{ cluster_name }} + --role-arn {{ cluster_role_arn }} + " + register: add_role_result + failed_when: > + add_role_result.rc !=0 and ('DBClusterRoleAlreadyExists' not in add_role_result.stderr) + changed_when: "add_role_result.rc == 0" + when: cluster_name is defined and cluster_role_arn is defined + +- include: create_db_and_users.yml + when: database_connection.login_host is defined diff --git a/playbooks/create_role.yml b/playbooks/create_role.yml index 966743e628f..3589ac6477f 100644 --- a/playbooks/create_role.yml +++ b/playbooks/create_role.yml @@ -1,7 +1,7 @@ --- # Creates a new ansible role # Usage: -# ansible-playbook -c local --limit "localhost," ./create_role.yml -i "localhost," -e role_name=my_awesome_role +# ansible-playbook -c local --limit "localhost," ./create_role.yml -i "localhost," -e my_role_name=my_awesome_role # - hosts: localhost gather_facts: False diff --git a/playbooks/create_user.yml b/playbooks/create_user.yml new file mode 100644 index 00000000000..64269ccf33f --- /dev/null +++ b/playbooks/create_user.yml @@ -0,0 +1,26 @@ +# Creates a single user on a server +- name: Create a single user + hosts: all + become: True + gather_facts: True + pre_tasks: + - fail: msg="You must pass a user into this play" + when: user is not defined + - name: give access with no sudo + set_fact: + CUSTOM_USER_INFO: + - name: "{{ user }}" + github: true + state: "{{ state | default('present') }}" + when: give_sudo is not defined + - name: give access with sudo + set_fact: + CUSTOM_USER_INFO: + - name: "{{ user }}" + type: admin + github: true + state: "{{ state | default('present') }}" + when: give_sudo is defined + roles: + - role: user + user_info: "{{ CUSTOM_USER_INFO }}" diff --git a/playbooks/credentials.yml b/playbooks/credentials.yml new file mode 100644 index 00000000000..625d6ea8341 --- /dev/null +++ b/playbooks/credentials.yml @@ -0,0 +1,26 @@ +- name: Deploy edX Credentials Service + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'credentials' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - credentials + - credentials + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'credentials' + when: CREDENTIALS_HERMES_ENABLED diff --git a/playbooks/demo.yml b/playbooks/demo.yml new file mode 100644 index 00000000000..403155806e9 --- /dev/null +++ b/playbooks/demo.yml @@ -0,0 +1,15 @@ +- name: Deploy demo course + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - demo + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/deploy_nginx_all_roles.yml b/playbooks/deploy_nginx_all_roles.yml new file mode 100644 index 00000000000..f1a0dfc3f8b --- /dev/null +++ b/playbooks/deploy_nginx_all_roles.yml @@ -0,0 +1,18 @@ +- name: Configure instance(s) + hosts: all + become: True + gather_facts: False + vars_files: + - roles/edxapp/defaults/main.yml + - roles/xqueue/defaults/main.yml + roles: + - common + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - cms + - lms + - xqueue + nginx_default_sites: + - lms diff --git a/playbooks/deploy_nginx_for_dbt_docs.yml b/playbooks/deploy_nginx_for_dbt_docs.yml new file mode 100644 index 00000000000..cefc7258a43 --- /dev/null +++ b/playbooks/deploy_nginx_for_dbt_docs.yml @@ -0,0 +1,9 @@ +- name: Deploy dbt-docs server + hosts: all + become: True + gather_facts: True + roles: + - aws + - nginx + - aws_cloudwatch_agent + - dbt_docs_nginx \ No newline at end of file diff --git a/playbooks/designer.yml b/playbooks/designer.yml new file mode 100644 index 00000000000..ada06ab1eb5 --- /dev/null +++ b/playbooks/designer.yml @@ -0,0 +1,21 @@ +- name: Deploy edX designer + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'designer' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - designer + - designer + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'designer' + when: DESIGNER_HERMES_ENABLED diff --git a/playbooks/devstack_ami.yml b/playbooks/devstack_ami.yml new file mode 100644 index 00000000000..8f8631f8a67 --- /dev/null +++ b/playbooks/devstack_ami.yml @@ -0,0 +1,11 @@ +- name: Build cloud devstack AMI + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: aws_devstack diff --git a/playbooks/discovery.yml b/playbooks/discovery.yml new file mode 100644 index 00000000000..eb0717c0823 --- /dev/null +++ b/playbooks/discovery.yml @@ -0,0 +1,27 @@ +- name: Deploy edX Course Discovery Service + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'discovery' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - discovery + - discovery + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'discovery' + when: DISCOVERY_HERMES_ENABLED + diff --git a/playbooks/ec2.ini b/playbooks/ec2.ini index 9d2ad81f994..f677fd26bfd 100644 --- a/playbooks/ec2.ini +++ b/playbooks/ec2.ini @@ -11,7 +11,7 @@ # AWS regions to make calls to. Set this to 'all' to make request to all regions # in AWS and merge the results together. Alternatively, set this to a comma # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' -regions = all +regions = us-east-1,eu-west-1 regions_exclude = us-gov-west-1 # When generating inventory, Ansible needs to know how to address a server. diff --git a/playbooks/ec2.py b/playbooks/ec2.py index 9f71a2f50ec..01fb64afbf3 100755 --- a/playbooks/ec2.py +++ b/playbooks/ec2.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -''' +""" EC2 external inventory script ================================= @@ -87,7 +87,7 @@ Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. -''' +""" # (c) 2012, Peter Sankauskas # @@ -117,7 +117,9 @@ from boto import ec2 from boto import rds from boto import route53 -import ConfigParser +import six.moves.configparser +import traceback +import six try: import json @@ -125,13 +127,16 @@ import simplejson as json -class Ec2Inventory(object): +class Ec2Inventory: + def _empty_inventory(self): + return {"_meta": {"hostvars": {}}} + def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones - self.inventory = {} + self.inventory = self._empty_inventory() # Index of hostname (address) to instance ID self.index = {} @@ -145,7 +150,6 @@ def __init__(self): self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() - # Data to print if self.args.host: data_to_print = self.get_host_info() @@ -156,15 +160,19 @@ def __init__(self): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) - - print data_to_print + print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) + if self.args.tags_only: + to_check = self.cache_path_tags + else: + to_check = self.cache_path_cache + + if os.path.isfile(to_check): + mod_time = os.path.getmtime(to_check) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): @@ -176,7 +184,7 @@ def is_cache_valid(self): def read_settings(self): ''' Reads the settings from the ec2.ini file ''' - config = ConfigParser.SafeConfigParser() + config = six.moves.configparser.SafeConfigParser() config.read(self.args.inifile) # is eucalyptus? @@ -213,27 +221,45 @@ def read_settings(self): config.get('ec2', 'route53_excluded_zones', '').split(',')) # Cache related - cache_path = config.get('ec2', 'cache_path') - self.cache_path_cache = cache_path + "/ansible-ec2.cache" - self.cache_path_index = cache_path + "/ansible-ec2.index" - self.cache_max_age = config.getint('ec2', 'cache_max_age') - + if 'EC2_CACHE_PATH' in os.environ: + cache_path = os.environ['EC2_CACHE_PATH'] + elif self.args.cache_path: + cache_path = self.args.cache_path + else: + cache_path = config.get('ec2', 'cache_path') + if not os.path.exists(cache_path): + os.makedirs(cache_path) + + if 'AWS_PROFILE' in os.environ: + aws_profile = "{}-".format(os.environ.get('AWS_PROFILE')) + else: + aws_profile = "" + self.cache_path_cache = cache_path + f"/{aws_profile}ansible-ec2.cache" + self.cache_path_tags = cache_path + f"/{aws_profile}ansible-ec2.tags.cache" + self.cache_path_index = cache_path + f"/{aws_profile}ansible-ec2.index" + self.cache_max_age = config.getint('ec2', 'cache_max_age') def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--tags-only', action='http://github.com/store_true', default=False, + help='only return tags (default: False)') parser.add_argument('--list', action='http://github.com/store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='http://github.com/store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='http://github.com/store_true', default=False, + parser.add_argument('--refresh-cache', action='http://github.com/store_true', default=True, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini') parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile) + parser.add_argument( + '--cache-path', + help='Override the cache path set in ini file', + required=False) self.args = parser.parse_args() @@ -247,9 +273,12 @@ def do_api_calls_update_cache(self): self.get_instances_by_region(region) self.get_rds_instances_by_region(region) - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) + if self.args.tags_only: + self.write_to_cache(self.inventory, self.cache_path_tags) + else: + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular @@ -266,21 +295,21 @@ def get_instances_by_region(self, region): if conn is None: print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) - + reservations = conn.get_all_instances() for reservation in reservations: - instances = sorted(reservation.instances) + instances = sorted(reservation.instances, key=lambda x: x.id) for instance in instances: self.add_instance(instance, region) - + except boto.exception.BotoServerError as e: if not self.eucalyptus: - print "Looks like AWS is down again:" - print e + print("Looks like AWS is down again:") + print(e) sys.exit(1) def get_rds_instances_by_region(self, region): - ''' Makes an AWS API call to the list of RDS instances in a particular + ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: @@ -290,8 +319,8 @@ def get_rds_instances_by_region(self, region): for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: - print "Looks like AWS RDS is down: " - print e + print("Looks like AWS RDS is down: ") + print(e) sys.exit(1) def get_instance(self, region, instance_id): @@ -349,19 +378,19 @@ def add_instance(self, instance, region): # Inventory: Group by key pair if instance.key_name: self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest) - + # Inventory: Group by security group try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, dest) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' + print('Package boto seems a bit older.') + print('Please upgrade boto >= 2.3.0.') sys.exit(1) # Inventory: Group by tag keys - for k, v in instance.tags.iteritems(): + for k, v in instance.tags.items(): key = self.to_safe("tag_" + k + "=" + v) self.push(self.inventory, key, dest) self.keep_first(self.inventory, 'first_in_' + key, dest) @@ -403,18 +432,18 @@ def add_rds_instance(self, instance, region): # Inventory: Group by availability zone self.push(self.inventory, instance.availability_zone, dest) - + # Inventory: Group by instance type self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest) - + # Inventory: Group by security group try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, dest) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' + print('Package boto seems a bit older.') + print('Please upgrade boto >= 2.3.0.') sys.exit(1) # Inventory: Group by engine @@ -493,18 +522,17 @@ def get_host_info(self): for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) - # Handle complex types - if type(value) in [int, bool]: + if isinstance(value, (int, bool)): instance_vars[key] = value - elif type(value) in [str, unicode]: + elif isinstance(value, str): instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2_tags': - for k, v in value.iteritems(): + for k, v in value.items(): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': @@ -541,8 +569,10 @@ def keep_first(self, my_dict, key, element): def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' - - cache = open(self.cache_path_cache, 'r') + if self.args.tags_only: + cache = open(self.cache_path_tags) + else: + cache = open(self.cache_path_cache) json_inventory = cache.read() return json_inventory @@ -550,13 +580,15 @@ def get_inventory_from_cache(self): def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' - cache = open(self.cache_path_index, 'r') + cache = open(self.cache_path_index) json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' + ''' + Writes data in JSON format to a file + ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') @@ -568,13 +600,14 @@ def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - return re.sub("[^A-Za-z0-9\-]", "_", word) + return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' - + if self.args.tags_only: + data = [key for key in data.keys() if 'tag_' in key] if pretty: return json.dumps(data, sort_keys=True, indent=2) else: @@ -582,5 +615,11 @@ def json_format_dict(self, data, pretty=False): # Run the script -Ec2Inventory() - +RETRIES = 3 + +for _ in range(RETRIES): + try: + Ec2Inventory() + break + except Exception: + traceback.print_exc() diff --git a/playbooks/ecommerce.yml b/playbooks/ecommerce.yml new file mode 100644 index 00000000000..9f36e4163c3 --- /dev/null +++ b/playbooks/ecommerce.yml @@ -0,0 +1,26 @@ +- name: Deploy edX Ecommerce + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'ecommerce' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - ecommerce + - ecommerce + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'ecommerce' + when: ECOMMERCE_HERMES_ENABLED diff --git a/playbooks/ecomworker.yml b/playbooks/ecomworker.yml new file mode 100644 index 00000000000..f7c288770bb --- /dev/null +++ b/playbooks/ecomworker.yml @@ -0,0 +1,23 @@ +- name: Deploy edX Ecommerce Worker + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - ecomworker + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'ecomworker' + when: ECOMMERCE_WORKER_HERMES_ENABLED + diff --git a/playbooks/edx-auth-proxy.yml b/playbooks/edx-auth-proxy.yml new file mode 100644 index 00000000000..392ab3f7fa8 --- /dev/null +++ b/playbooks/edx-auth-proxy.yml @@ -0,0 +1,17 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - role: python + tags: + - install + - install:system-requirements + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + roles: + - oauth2_proxy diff --git a/playbooks/edx-east b/playbooks/edx-east new file mode 120000 index 00000000000..945c9b46d68 --- /dev/null +++ b/playbooks/edx-east @@ -0,0 +1 @@ +. \ No newline at end of file diff --git a/playbooks/edx-east/README.md b/playbooks/edx-east/README.md deleted file mode 100644 index d87caeed65f..00000000000 --- a/playbooks/edx-east/README.md +++ /dev/null @@ -1,6 +0,0 @@ -This directory contains playbooks used by edx-east -for provisioning - -``` -ansible-playbook -c ssh -vvv --user=ubuntu -i ./ec2.py -e 'secure_dir=path/to/configuration-secure/ansible' -``` diff --git a/playbooks/edx-east/ansible.cfg b/playbooks/edx-east/ansible.cfg deleted file mode 100644 index 3259d75fe9c..00000000000 --- a/playbooks/edx-east/ansible.cfg +++ /dev/null @@ -1,9 +0,0 @@ -# config file for ansible -- http://ansible.github.com -# nearly all parameters can be overridden in ansible-playbook or with command line flags -# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first - -[defaults] - -jinja2_extensions=jinja2.ext.do -host_key_checking=False -roles_path=../../../ansible-roles diff --git a/playbooks/edx-east/aws.yml b/playbooks/edx-east/aws.yml deleted file mode 100644 index 48bd8f833b1..00000000000 --- a/playbooks/edx-east/aws.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Deploy aws - hosts: all - sudo: True - gather_facts: True - roles: - - aws diff --git a/playbooks/edx-east/callback_plugins b/playbooks/edx-east/callback_plugins deleted file mode 120000 index a7b55d2519d..00000000000 --- a/playbooks/edx-east/callback_plugins +++ /dev/null @@ -1 +0,0 @@ -../callback_plugins \ No newline at end of file diff --git a/playbooks/edx-east/certs.yml b/playbooks/edx-east/certs.yml deleted file mode 100644 index 9ed2c0a268f..00000000000 --- a/playbooks/edx-east/certs.yml +++ /dev/null @@ -1,14 +0,0 @@ -- name: Deploy certs - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - aws - - certs - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-east/common.yml b/playbooks/edx-east/common.yml deleted file mode 100644 index 2d46056b20c..00000000000 --- a/playbooks/edx-east/common.yml +++ /dev/null @@ -1,13 +0,0 @@ -- name: Deploy common - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - common - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-east/create_dbs.yml b/playbooks/edx-east/create_dbs.yml deleted file mode 100644 index ea83b5c1fea..00000000000 --- a/playbooks/edx-east/create_dbs.yml +++ /dev/null @@ -1,156 +0,0 @@ -# This is a utility play to initialize the mysql dbs for the following -# roles: -# - edxapp -# - xqueue -# - ora -# - discern -# -# The mysql root user MUST be passed in as extra vars for -# at least one of the databases. -# -# the environment and deployment must be passed in as COMMON_ENVIRONMENT -# and COMMON_DEPLOYMENT. These two vars should be set in the secret -# var file for the corresponding vpc stack -# -# Example invocation: -# -# Create the databases for edxapp and xqueue: -# -# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root xqueue_db_root_user=root" -# -# -- name: Create all databases on the edX stack - hosts: all - gather_facts: False - vars: - # These should be set to the root user for the - # db, if left 'None' the databse will be skipped - edxapp_db_root_user: 'None' - xqueue_db_root_user: 'None' - ora_db_root_user: 'None' - discern_db_root_user: 'None' - - vars_prompt: - # passwords use vars_prompt so they aren't in the - # bash history - - name: "edxapp_db_root_pass" - prompt: "Password for edxapp root mysql user (enter to skip)" - default: "None" - private: True - - name: "xqueue_db_root_pass" - prompt: "Password for xqueue root mysql user (enter to skip)" - default: "None" - private: True - - name: "ora_db_root_pass" - prompt: "Password for ora root mysql user (enter to skip)" - default: "None" - private: True - - name: "discern_db_root_pass" - prompt: "Password for discern root mysql user (enter to skip)" - default: "None" - private: True - - - tasks: - - fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play" - when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined - - name: create mysql databases for the edX stack - mysql_db: > - db={{ item[0] }}{{ item[1].db_name }} - state=present - login_host={{ item[1].db_host }} - login_user={{ item[1].db_user }} - login_password={{ item[1].db_pass }} - encoding=utf8 - when: item[1].db_user != 'None' - with_nested: - - ['{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_', ''] - - - # These defaults are needed, otherwise ansible will throw - # variable undefined errors for when they are not defined - # in secret vars - - db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}" - db_user: "{{ edxapp_db_root_user }}" - db_pass: "{{ edxapp_db_root_pass }}" - - db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}" - db_user: "{{ xqueue_db_root_user }}" - db_pass: "{{ xqueue_db_root_pass }}" - - db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ ORA_MYSQL_HOST|default('None') }}" - db_user: "{{ ora_db_root_user }}" - db_pass: "{{ ora_db_root_pass }}" - - - name: assign mysql user permissions for db user - mysql_user: - name: "{{ item.db_user_to_modify }}" - priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE" - password: "{{ item.db_user_to_modify_pass }}" - login_host: "{{ item.db_host }}" - login_user: "{{ item.db_user }}" - login_password: "{{ item.db_pass }}" - host: '%' - when: item.db_user != 'None' - with_items: - # These defaults are needed, otherwise ansible will throw - # variable undefined errors for when they are not defined - # in secret vars - - db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}" - db_user: "{{ edxapp_db_root_user|default('None') }}" - db_pass: "{{ edxapp_db_root_pass|default('None') }}" - db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}" - db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}" - - db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}" - db_user: "{{ xqueue_db_root_user|default('None') }}" - db_pass: "{{ xqueue_db_root_pass|default('None') }}" - db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}" - db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}" - - db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ ORA_MYSQL_HOST|default('None') }}" - db_user: "{{ ora_db_root_user|default('None') }}" - db_pass: "{{ ora_db_root_pass|default('None') }}" - db_user_to_modify: "{{ ORA_MYSQL_USER }}" - db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}" - - - # The second call to mysql_user needs to have append_privs set to - # yes otherwise it will overwrite the previous run. - # This means that both tasks will report changed on every ansible - # run - - - name: assign mysql user permissions for db test user - mysql_user: - append_privs: yes - name: "{{ item.db_user_to_modify }}" - priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL" - password: "{{ item.db_user_to_modify_pass }}" - login_host: "{{ item.db_host }}" - login_user: "{{ item.db_user }}" - login_password: "{{ item.db_pass }}" - host: '%' - when: item.db_user != 'None' - with_items: - # These defaults are needed, otherwise ansible will throw - # variable undefined errors for when they are not defined - # in secret vars - - db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}" - db_user: "{{ edxapp_db_root_user|default('None') }}" - db_pass: "{{ edxapp_db_root_pass|default('None') }}" - db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}" - db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}" - - db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}" - db_user: "{{ xqueue_db_root_user|default('None') }}" - db_pass: "{{ xqueue_db_root_pass|default('None') }}" - db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}" - db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}" - - db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}" - db_host: "{{ ORA_MYSQL_HOST|default('None') }}" - db_user: "{{ ora_db_root_user|default('None') }}" - db_pass: "{{ ora_db_root_pass|default('None') }}" - db_user_to_modify: "{{ ORA_MYSQL_USER }}" - db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}" diff --git a/playbooks/edx-east/create_user.yml b/playbooks/edx-east/create_user.yml deleted file mode 100644 index 5a60b23ac26..00000000000 --- a/playbooks/edx-east/create_user.yml +++ /dev/null @@ -1,26 +0,0 @@ -# Creates a single user on a server -# By default no super-user privileges -# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv" -# Create a user with sudo privileges -# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv" -e "give_sudo=true" -- name: Create a single user - hosts: all - sudo: True - gather_facts: False - vars: - give_sudo: False - pre_tasks: - - fail: msg="You must pass a user into this play" - when: not user - - name: give access with no sudo - set_fact: - gh_users_no_sudo: - - "{{ user }}" - when: not give_sudo or give_sudo == "false" - - name: give access with sudo - set_fact: - gh_users: - - "{{ user }}" - when: give_sudo - roles: - - gh_users diff --git a/playbooks/edx-east/demo.yml b/playbooks/edx-east/demo.yml deleted file mode 100644 index 61ac8cb78f6..00000000000 --- a/playbooks/edx-east/demo.yml +++ /dev/null @@ -1,13 +0,0 @@ -- name: Deploy demo course - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - demo - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-east/deploy_nginx_all_roles.yml b/playbooks/edx-east/deploy_nginx_all_roles.yml deleted file mode 100644 index af36bd626be..00000000000 --- a/playbooks/edx-east/deploy_nginx_all_roles.yml +++ /dev/null @@ -1,20 +0,0 @@ -- name: Configure instance(s) - hosts: all - sudo: True - gather_facts: False - vars_files: - - roles/edxapp/defaults/main.yml - - roles/ora/defaults/main.yml - - roles/xqueue/defaults/main.yml - - roles/xserver/defaults/main.yml - roles: - - common - - role: nginx - nginx_sites: - - cms - - lms - - ora - - xqueue - - xserver - nginx_default_sites: - - lms diff --git a/playbooks/edx-east/deployer.yml b/playbooks/edx-east/deployer.yml deleted file mode 100644 index 89dcaa2dc45..00000000000 --- a/playbooks/edx-east/deployer.yml +++ /dev/null @@ -1,19 +0,0 @@ -# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@gh_users.yml" -e "@/path/to/secure/ansible/vars/hotg.yml" -e "@/path/to/configuration-secure/ansible/vars/common/common.yml" --limit="tag_aws_cloudformation_stack-name_" -# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines. -# Setup user login on the bastion -- name: Configure Bastion - hosts: tag_role_bastion - sudo: True - gather_facts: False - roles: - - gh_users -# Configure an admin instance with jenkins and asgard. -- name: Configure instance(s) - hosts: tag_role_admin - sudo: True - gather_facts: True - roles: - - common - - gh_users - - jenkins_master - - hotg diff --git a/playbooks/edx-east/devpi.yml b/playbooks/edx-east/devpi.yml deleted file mode 100644 index 205c65d404c..00000000000 --- a/playbooks/edx-east/devpi.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Deploy devpi - hosts: all - sudo: True - gather_facts: True - roles: - - devpi diff --git a/playbooks/edx-east/discern.yml b/playbooks/edx-east/discern.yml deleted file mode 100644 index 746afa71d60..00000000000 --- a/playbooks/edx-east/discern.yml +++ /dev/null @@ -1,17 +0,0 @@ -- name: Deploy discern - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - aws - - role: nginx - nginx_sites: - - discern - - discern - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-east/ec2.py b/playbooks/edx-east/ec2.py deleted file mode 120000 index 5c6f177c184..00000000000 --- a/playbooks/edx-east/ec2.py +++ /dev/null @@ -1 +0,0 @@ -../ec2.py \ No newline at end of file diff --git a/playbooks/edx-east/edx_ansible.yml b/playbooks/edx-east/edx_ansible.yml deleted file mode 100644 index 7ae30643abc..00000000000 --- a/playbooks/edx-east/edx_ansible.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Deploy the edx_ansible role - hosts: all - sudo: True - gather_facts: False - roles: - - edx_ansible diff --git a/playbooks/edx-east/edx_continuous_integration.yml b/playbooks/edx-east/edx_continuous_integration.yml deleted file mode 100644 index 9d306d2ccbe..00000000000 --- a/playbooks/edx-east/edx_continuous_integration.yml +++ /dev/null @@ -1,35 +0,0 @@ -- name: Configure instance(s) - hosts: all - sudo: True - gather_facts: True - serial: 10 - vars: - migrate_db: "yes" - openid_workaround: True - roles: - - aws - - role: nginx - nginx_sites: - - cms - - lms - - ora - - xqueue - - xserver - nginx_default_sites: - - lms - - edxlocal - - mongo - - { role: 'edxapp', celery_worker: True } - - edxapp - - role: demo - tags: ['demo'] - - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - - oraclejdk - - elasticsearch - - forum - - { role: "xqueue", update_users: True } - - xserver - - ora - - discern - - certs - - edx_ansible diff --git a/playbooks/edx-east/edx_dev2.yml b/playbooks/edx-east/edx_dev2.yml deleted file mode 100644 index aa89b9636e0..00000000000 --- a/playbooks/edx-east/edx_dev2.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_bastion - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/dev2.yml" - - "{{ secure_dir }}/vars/common/common.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - common -- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_edxapp - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/dev2.yml" - - "{{ secure_dir }}/vars/common/common.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - datadog - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - role: 'edxapp' - EDXAPP_LMS_NGINX_PORT: 80 - EDXAPP_CMS_NGINX_PORT: 80 - edxapp_lms_env: 'lms.envs.load_test' - edx_platform_version: 'sarina/install-datadog' -- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/dev2.yml" - - "{{ secure_dir }}/vars/common/common.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - datadog - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - role: 'edxapp' - edxapp_lms_env: 'lms.envs.load_test' - celery_worker: True - edx_platform_version: 'sarina/install-datadog' -#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xserver -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/dev/dev2.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - nginx -# - xserver -#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_rabbitmq -# serial: 1 -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/dev/dev2.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - rabbitmq -#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xqueue -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/dev/dev2.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - nginx -# - xqueue diff --git a/playbooks/edx-east/edx_feanilsandbox.yml b/playbooks/edx-east/edx_feanilsandbox.yml deleted file mode 100644 index c039eee9a0b..00000000000 --- a/playbooks/edx-east/edx_feanilsandbox.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_edxapp - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - - "{{ secure_dir }}/vars/common/common.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - datadog - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - role: 'edxapp' - edxapp_lms_env: 'lms.envs.load_test' - edx_platform_version: 'release' - - splunkforwarder -- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_worker - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - - "{{ secure_dir }}/vars/common/common.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - datadog - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - role: 'edxapp' - edxapp_lms_env: 'lms.envs.load_test' - celery_worker: True - edx_platform_version: 'release' - - splunkforwarder -- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xserver - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - role: nginx - nginx_sites: - - xserver - - xserver - - splunkforwarder -- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_rabbitmq - serial: 1 - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - rabbitmq - - splunkforwarder -- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xqueue - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - role: nginx - nginx_sites: - - xqueue - - xqueue - - splunkforwarder -- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_mongo - sudo: True - vars_files: - - "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - - "{{ secure_dir }}/vars/users.yml" - roles: - - role: 'mongo' - mongo_clustered: true diff --git a/playbooks/edx-east/edx_jenkins_tests.yml b/playbooks/edx-east/edx_jenkins_tests.yml deleted file mode 100644 index 7ac36bce620..00000000000 --- a/playbooks/edx-east/edx_jenkins_tests.yml +++ /dev/null @@ -1,33 +0,0 @@ -- name: Configure instance(s) - hosts: jenkins_test - sudo: True - gather_facts: True - vars: - migrate_db: "yes" - openid_workaround: True - ansible_ssh_private_key_file: /var/lib/jenkins/continuous-integration.pem - mysql5_workaround: True - vars_files: - - "{{ secure_dir }}/vars/edxapp_ref_users.yml" - - "{{ secure_dir }}/vars/edxapp_sandbox.yml" - # overrides specific to the jenkins test playbook - - "{{ secure_dir }}/vars/edx_jenkins_tests.yml" - roles: - - common - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - - xqueue - - xserver - - ora - nginx_default_sites: - - lms - - edxlocal - - mongo - - edxapp - - xqueue - - xserver - - ora - - rabbitmq diff --git a/playbooks/edx-east/edx_mirror.yml b/playbooks/edx-east/edx_mirror.yml deleted file mode 100644 index 3a1d2731d94..00000000000 --- a/playbooks/edx-east/edx_mirror.yml +++ /dev/null @@ -1,19 +0,0 @@ -# ansible-playbook --limit tag_Name_mirror edx_mirror.yml --user ubuntu -i ec2.py -- name: Configure instance(s) - hosts: all - sudo: True - gather_facts: False - roles: - - role: nginx - nginx_sites: - - devpi - - gh_mirror - tags: ['r_nginx'] - - role: supervisor - supervisor_servers: - - devpi - - role: devpi - tags: ['r_devpi'] - - role: gh_mirror - tags: ['r_gh_mirror'] - diff --git a/playbooks/edx-east/edx_notifier.yml b/playbooks/edx-east/edx_notifier.yml deleted file mode 100644 index 3d7ed19c527..00000000000 --- a/playbooks/edx-east/edx_notifier.yml +++ /dev/null @@ -1,70 +0,0 @@ -- name: Configure stage instance(s) - hosts: notifier_stage - sudo: True - vars_files: - - "{{ secure_dir }}/vars/stage/notifier.yml" - - "{{ secure_dir }}/vars/users.yml" - gather_facts: True - roles: - - role: virtualenv - virtualenv_user: "notifier" - virtualenv_user_home: "/opt/wwc/notifier" - virtualenv_name: "notifier" - - notifier - -- name: Configure loadtest instance(s) - hosts: notifier_loadtest - sudo: True - vars_files: - - "{{ secure_dir }}/vars/loadtest/notifier.yml" - - "{{ secure_dir }}/vars/users.yml" - gather_facts: True - roles: - - role: virtualenv - virtualenv_user: "notifier" - virtualenv_user_home: "/opt/wwc/notifier" - virtualenv_name: "notifier" - - notifier - -- name: Configure stage edge instance(s) - hosts: notifier_edge_stage - sudo: True - vars_files: - - "{{ secure_dir }}/vars/edge_stage/notifier.yml" - - "{{ secure_dir }}/vars/users.yml" - gather_facts: True - roles: - - role: virtualenv - virtualenv_user: "notifier" - virtualenv_user_home: "/opt/wwc/notifier" - virtualenv_name: "notifier" - - notifier - -- name: Configure prod instance(s) - hosts: notifier_prod - sudo: True - vars_files: - - "{{ secure_dir }}/vars/prod/notifier.yml" - - "{{ secure_dir }}/vars/users.yml" - gather_facts: True - roles: - - role: virtualenv - virtualenv_user: "notifier" - virtualenv_user_home: "/opt/wwc/notifier" - virtualenv_name: "notifier" - - notifier - -- name: Configure edge prod instance(s) - hosts: notifier_edge_prod - sudo: True - vars_files: - - "{{ secure_dir }}/vars/edge_prod/notifier.yml" - - "{{ secure_dir }}/vars/users.yml" - gather_facts: True - vars: - roles: - - role: virtualenv - virtualenv_user: "notifier" - virtualenv_user_home: "/opt/wwc/notifier" - virtualenv_name: "notifier" - - notifier diff --git a/playbooks/edx-east/edx_provision.yml b/playbooks/edx-east/edx_provision.yml deleted file mode 100644 index fd326e1af11..00000000000 --- a/playbooks/edx-east/edx_provision.yml +++ /dev/null @@ -1,92 +0,0 @@ -- name: Create ec2 instance - hosts: localhost - connection: local - gather_facts: False - vars: - keypair: continuous-integration - instance_type: m1.medium - security_group: sandbox - # ubuntu 12.04 - ami: ami-d0f89fb9 - region: us-east-1 - zone: us-east-1b - instance_tags: - environment: sandbox - github_username: temp - Name: sandbox-temp - source: provisioning-script - owner: temp - root_ebs_size: 50 - dns_name: temp - dns_zone: m.sandbox.edx.org - name_tag: sandbox-temp - elb: false - roles: - - role: launch_ec2 - keypair: "{{ keypair }}" - instance_type: "{{ instance_type }}" - security_group: "{{ security_group }}" - ami: "{{ ami }}" - region: "{{ region }}" - instance_tags: "{{ instance_tags }}" - root_ebs_size: "{{ root_ebs_size }}" - dns_name: "{{ dns_name }}" - dns_zone: "{{ dns_zone }}" - zone: "{{ zone }}" - terminate_instance: true - instance_profile_name: sandbox - -- name: Configure instance(s) - hosts: launched - sudo: True - gather_facts: False - vars: - elb: false - pre_tasks: - - name: Wait for cloud-init to finish - wait_for: > - path=/var/log/cloud-init.log - timeout=15 - search_regex="final-message" - vars_files: - - roles/edxapp/defaults/main.yml - - roles/ora/defaults/main.yml - - roles/xqueue/defaults/main.yml - - roles/xserver/defaults/main.yml - - roles/forum/defaults/main.yml - roles: - # rerun common to set the hostname, nginx to set basic auth - - common - - role: nginx - nginx_sites: - - cms - - lms - - ora - - xqueue - - xserver - - forum - nginx_default_sites: - - lms - # gh_users hash must be passed - # in as a -e variable - - gh_users - post_tasks: - - name: get instance id for elb registration - local_action: - module: ec2_lookup - region: us-east-1 - tags: - Name: "{{ name_tag }}" - register: ec2_info - when: elb - sudo: False - - name: register instance into an elb if one was provided - local_action: - module: ec2_elb_local_1.3 - region: "{{ region }}" - instance_id: "{{ ec2_info.instance_ids[0] }}" - state: present - ec2_elbs: - - "{{ elb }}" - when: elb - sudo: False diff --git a/playbooks/edx-east/edx_vpc.yml b/playbooks/edx-east/edx_vpc.yml deleted file mode 100644 index 8e1928987f1..00000000000 --- a/playbooks/edx-east/edx_vpc.yml +++ /dev/null @@ -1,134 +0,0 @@ ---- -- hosts: first_in_tag_role_mongo - sudo: True - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - role: 'mongo' - mongo_create_users: yes -#- hosts: tag_role_mongo:!first_in_tag_role_mongo -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" -# - "{{ secure_dir }}/vars/common/common.yml" -# roles: -# - gh_users -# - mongo -- hosts: first_in_tag_role_edxapp - sudo: True - serial: 1 - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - datadog - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - role: 'edxapp' - edxapp_lms_env: 'lms.envs.load_test' - migrate_db: '{{ RUN_EDXAPP_MIGRATION }}' - openid_workaround: 'yes' - - splunkforwarder -- hosts: tag_role_edxapp:!first_in_tag_role_edxapp - sudo: True - serial: 1 - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - datadog - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_site: - - lms - - role: 'edxapp' - edxapp_lms_env: 'lms.envs.load_test' - - splunkforwarder -- hosts: tag_role_worker - sudo: True - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - datadog - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_site: - - lms - - role: 'edxapp' - edxapp_lms_env: 'lms.envs.load_test' - celery_worker: True - - splunkforwarder -- hosts: tag_role_xserver - sudo: True - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - role: nginx - nginx_sites: - - xserver - - xserver - - splunkforwarder -- hosts: tag_role_rabbitmq - serial: 1 - sudo: True - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - rabbitmq - - splunkforwarder -- hosts: first_in_tag_role_xqueue - sudo: True - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - role: nginx - nginx_sites: - - xqueue - - role: xqueue - migrate_db: '{{ RUN_XQUEUE_MIGRATION }}' - - splunkforwarder -- hosts: tag_role_xqueue:!first_in_tag_role_xqueue - sudo: True - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - role: nginx - nginx_sites: - - xqueue - - xqueue - - splunkforwarder -- hosts: tag_role_forum - sudo: True - vars_files: - - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml" - - "{{ secure_dir }}/vars/common/common.yml" - roles: - - gh_users - - oraclejdk - - elasticsearch - - forum diff --git a/playbooks/edx-east/edxapp.yml b/playbooks/edx-east/edxapp.yml deleted file mode 100644 index 0f844194b96..00000000000 --- a/playbooks/edx-east/edxapp.yml +++ /dev/null @@ -1,20 +0,0 @@ -- name: Deploy edxapp - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - aws - - role: nginx - nginx_sites: - - lms - - cms - nginx_default_sites: - - lms - - edxapp - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-east/elasticsearch.yml b/playbooks/edx-east/elasticsearch.yml deleted file mode 100644 index 8c40c8a6590..00000000000 --- a/playbooks/edx-east/elasticsearch.yml +++ /dev/null @@ -1,10 +0,0 @@ -- hosts: tag_play_commoncluster:&tag_environment_stage:&tag_deployment_edx - sudo: True - vars_files: - - "{{ secure_dir }}/vars/common/common.yml" - - "{{ secure_dir }}/vars/stage/stage-edx.yml" - roles: - - common - - gh_users - - oraclejdk - - elasticsearch diff --git a/playbooks/edx-east/files b/playbooks/edx-east/files deleted file mode 120000 index feb122881ce..00000000000 --- a/playbooks/edx-east/files +++ /dev/null @@ -1 +0,0 @@ -../files \ No newline at end of file diff --git a/playbooks/edx-east/forum.yml b/playbooks/edx-east/forum.yml deleted file mode 100644 index 2573aa73fb2..00000000000 --- a/playbooks/edx-east/forum.yml +++ /dev/null @@ -1,20 +0,0 @@ -- name: Deploy forum - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - enable_newrelic: True - roles: - - aws - - role: nginx - nginx_sites: - - forum - - forum - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder - - role: newrelic - when: enable_newrelic diff --git a/playbooks/edx-east/gerrit_deploy.yml b/playbooks/edx-east/gerrit_deploy.yml deleted file mode 100644 index 8bdd13bc968..00000000000 --- a/playbooks/edx-east/gerrit_deploy.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# Deploys gerrit on to a server. -# -# Usage: -# ansible-playbook gerrit_deploy.yml -i gerrit_inventory.ini -e "secure_dir=/path/to/secure/dir" - -- name: Install and configure gerrit - hosts: gerrit - sudo: True - gather_facts: True - vars_files: - - "{{ secure_dir }}/vars/gerrit.yml" - pre_tasks: - - name: update apt - apt: update_cache=yes - roles: - - gerrit diff --git a/playbooks/edx-east/group_vars b/playbooks/edx-east/group_vars deleted file mode 120000 index cc7e7a90f91..00000000000 --- a/playbooks/edx-east/group_vars +++ /dev/null @@ -1 +0,0 @@ -../group_vars \ No newline at end of file diff --git a/playbooks/edx-east/inventory.ini b/playbooks/edx-east/inventory.ini deleted file mode 120000 index b1341f25766..00000000000 --- a/playbooks/edx-east/inventory.ini +++ /dev/null @@ -1 +0,0 @@ -../inventory.ini \ No newline at end of file diff --git a/playbooks/edx-east/jenkins_master.yml b/playbooks/edx-east/jenkins_master.yml deleted file mode 100644 index cc4e9b03108..00000000000 --- a/playbooks/edx-east/jenkins_master.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Configure a Jenkins master instance -# This has the Jenkins Java app, but none of the requirements -# to run the tests. - -- name: Configure instance(s) - hosts: jenkins_master - sudo: True - gather_facts: True - vars: - COMMON_DATA_DIR: "/mnt" - roles: - - common - - gh_users - - jenkins_master diff --git a/playbooks/edx-east/jenkins_worker.yml b/playbooks/edx-east/jenkins_worker.yml deleted file mode 100644 index 33e7e0dd049..00000000000 --- a/playbooks/edx-east/jenkins_worker.yml +++ /dev/null @@ -1,16 +0,0 @@ -# Configure a Jenkins worker instance -# This has all the requirements to run test jobs, -# but not the Jenkins Java app. - -- name: Configure instance(s) - hosts: jenkins_worker - sudo: True - gather_facts: True - vars: - mongo_enable_journal: False - roles: - - common - - edxlocal - - mongo - - browsers - - jenkins_worker diff --git a/playbooks/edx-east/legacy_ora.yml b/playbooks/edx-east/legacy_ora.yml deleted file mode 100644 index ea40f7998b6..00000000000 --- a/playbooks/edx-east/legacy_ora.yml +++ /dev/null @@ -1,11 +0,0 @@ -# ansible-playbook -i ec2.py --limit="tag_group_grader:&tag_environment_stage" legacy_ora.yml -e "COMMON_ENV_TYPE=stage secure_dir=/path/to/secure/dir" -- name: Deploy legacy_ora - hosts: all - sudo: True - gather_facts: True - vars: - ora_app_dir: '/opt/wwc' - ora_user: 'www-data' - serial: 1 - roles: - - legacy_ora diff --git a/playbooks/edx-east/library b/playbooks/edx-east/library deleted file mode 120000 index 53bed9684d0..00000000000 --- a/playbooks/edx-east/library +++ /dev/null @@ -1 +0,0 @@ -../library \ No newline at end of file diff --git a/playbooks/edx-east/mlapi_prod.yml b/playbooks/edx-east/mlapi_prod.yml deleted file mode 100644 index 5a267876c5b..00000000000 --- a/playbooks/edx-east/mlapi_prod.yml +++ /dev/null @@ -1,19 +0,0 @@ -- hosts: - - tag_Group_mlapi_prod - vars_files: - - "{{ secure_dir }}/vars/mlapi_prod_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/mlapi_prod_users.yml" - roles: - - discern - sudo: True -- hosts: - - tag_Group_mlapi-bastion_prod - - tag_Group_mlapi-rabbitmq_prod - vars_files: - - "{{ secure_dir }}/vars/mlapi_prod_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/mlapi_prod_users.yml" - roles: - - common - sudo: True diff --git a/playbooks/edx-east/mlapi_sandbox.yml b/playbooks/edx-east/mlapi_sandbox.yml deleted file mode 100644 index 4efd16d55e3..00000000000 --- a/playbooks/edx-east/mlapi_sandbox.yml +++ /dev/null @@ -1,20 +0,0 @@ -- hosts: - - tag_Group_mlapi_sandbox - vars_files: - - "{{ secure_dir }}/vars/mlapi_sandbox_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/mlapi_sandbox_users.yml" - roles: - - discern - sudo: True -- hosts: - - tag_Group_mlapi-bastion_sandbox - - tag_Group_mlapi-rabbitmq_sandbox - vars_files: - - "{{ secure_dir }}/vars/mlapi_sandbox_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/mlapi_sandbox_users.yml" - roles: - - common - sudo: True - diff --git a/playbooks/edx-east/mlapi_stage.yml b/playbooks/edx-east/mlapi_stage.yml deleted file mode 100644 index 18d80c2d6fb..00000000000 --- a/playbooks/edx-east/mlapi_stage.yml +++ /dev/null @@ -1,19 +0,0 @@ -- hosts: - - tag_Group_mlapi_stage - vars_files: - - "{{ secure_dir }}/vars/mlapi_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/mlapi_stage_users.yml" - roles: - - discern - sudo: True -- hosts: - - tag_Group_mlapi-bastion_stage - - tag_Group_mlapi-rabbitmq_stage - vars_files: - - "{{ secure_dir }}/vars/mlapi_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/mlapi_stage_users.yml" - roles: - - common - sudo: True diff --git a/playbooks/edx-east/ora.yml b/playbooks/edx-east/ora.yml deleted file mode 100644 index 3e74011fde7..00000000000 --- a/playbooks/edx-east/ora.yml +++ /dev/null @@ -1,9 +0,0 @@ -- name: Deploy ora - hosts: all - sudo: True - gather_facts: True - roles: - - role: nginx - nginx_sites: - - ora - - ora diff --git a/playbooks/edx-east/rabbitmq.yml b/playbooks/edx-east/rabbitmq.yml deleted file mode 100644 index 2c58a09b7d5..00000000000 --- a/playbooks/edx-east/rabbitmq.yml +++ /dev/null @@ -1,7 +0,0 @@ -- name: Deploy rabbitmq - hosts: all - sudo: True - gather_facts: False - roles: - - aws - - rabbitmq diff --git a/playbooks/edx-east/restart_supervisor.yml b/playbooks/edx-east/restart_supervisor.yml deleted file mode 100644 index 93d08a560a3..00000000000 --- a/playbooks/edx-east/restart_supervisor.yml +++ /dev/null @@ -1,12 +0,0 @@ -- name: restarts supervisor - hosts: all - sudo: True - gather_facts: False - vars_files: - - roles/common/defaults/main.yml - - roles/supervisor/defaults/main.yml - tasks: - - name: supervisor | restart supervisor - service: > - name={{ supervisor_service }} - state=restarted diff --git a/playbooks/edx-east/secure_example b/playbooks/edx-east/secure_example deleted file mode 120000 index 776016b059f..00000000000 --- a/playbooks/edx-east/secure_example +++ /dev/null @@ -1 +0,0 @@ -../secure_example/ \ No newline at end of file diff --git a/playbooks/edx-east/stop_all_edx_services.yml b/playbooks/edx-east/stop_all_edx_services.yml deleted file mode 100644 index b7228d7693c..00000000000 --- a/playbooks/edx-east/stop_all_edx_services.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Stop all services - hosts: all - sudo: True - gather_facts: False - roles: - - stop_all_edx_services diff --git a/playbooks/edx-east/worker.yml b/playbooks/edx-east/worker.yml deleted file mode 100644 index 2d50c1afea4..00000000000 --- a/playbooks/edx-east/worker.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Deploy worker - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - aws - - role: edxapp - celery_worker: True - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-east/xqueue.yml b/playbooks/edx-east/xqueue.yml deleted file mode 100644 index 6496425549f..00000000000 --- a/playbooks/edx-east/xqueue.yml +++ /dev/null @@ -1,17 +0,0 @@ -- name: Deploy xqueue - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - aws - - role: nginx - nginx_sites: - - xqueue - - role: xqueue - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-east/xserver.yml b/playbooks/edx-east/xserver.yml deleted file mode 100644 index d658fe95315..00000000000 --- a/playbooks/edx-east/xserver.yml +++ /dev/null @@ -1,17 +0,0 @@ -- name: Deploy xserver - hosts: all - sudo: True - gather_facts: True - vars: - enable_datadog: True - enable_splunkforwarder: True - roles: - - aws - - role: nginx - nginx_sites: - - xserver - - role: xserver - - role: datadog - when: enable_datadog - - role: splunkforwarder - when: enable_splunkforwarder diff --git a/playbooks/edx-monitoring.yml b/playbooks/edx-monitoring.yml new file mode 100644 index 00000000000..feab7229089 --- /dev/null +++ b/playbooks/edx-monitoring.yml @@ -0,0 +1,22 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - role: python + tags: + - install + - install:system-requirements + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + roles: + - graphite + - grafana + - role: nginx + nginx_sites: + - graphite + - grafana diff --git a/playbooks/edx-west/.gitignore b/playbooks/edx-west/.gitignore deleted file mode 100644 index 689b0c53021..00000000000 --- a/playbooks/edx-west/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -configuration-secure -edx-secret diff --git a/playbooks/edx-west/README.md b/playbooks/edx-west/README.md deleted file mode 100644 index 25c589136bc..00000000000 --- a/playbooks/edx-west/README.md +++ /dev/null @@ -1,47 +0,0 @@ -Readme ------- - -This directory has the live playbooks that we use here at Stanford to -maintain our instance of OpenEdX at [class.stanford.edu][c]. We check -it in to this public repo since we think that others might benefit from -seeing how we are configured. - - [c]: https://class.stanford.edu/ - -That said, we haven't documented things in here well, so we have no -expectation that others will be able to make enough sense of this to -give us useful contributions back. Generally a PR affecting files in -here will be ignored / rejected. - -This README is a useful proximate place to keep commands. But it is -a public repo so we shouldn't store anything confidential in here. - -Other install docs: - -- Giulio's install doc [here][1]. - - [1]: https://docs.google.com/document/d/1ZDx51Jxa-zffyeKvHmTp_tIskLW9D9NRg9NytPTbnrA/edit#heading=h.iggugvghbcpf - - -Ansible Commands - Prod ------------------------ - -Generally we do installs as the "ubuntu" user. You want to make -sure that the stanford-deploy-20130415 ssh key is in your ssh agent. - - ANSIBLE_EC2_INI=ec2.ini ansible-playbook prod-log.yml -u ubuntu -c ssh -i ./ec2.py - - -Ansible Commands - Stage ------------------------- - -Verify that you're doing something reasonable: - - ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py --list-hosts - -Verify that you're doing something reasonable: - - ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py - - - diff --git a/playbooks/edx-west/ansible.cfg b/playbooks/edx-west/ansible.cfg deleted file mode 120000 index 61278e866ea..00000000000 --- a/playbooks/edx-west/ansible.cfg +++ /dev/null @@ -1 +0,0 @@ -../ansible.cfg \ No newline at end of file diff --git a/playbooks/edx-west/carnegie-prod-app.yml b/playbooks/edx-west/carnegie-prod-app.yml deleted file mode 100644 index 326a9e35c36..00000000000 --- a/playbooks/edx-west/carnegie-prod-app.yml +++ /dev/null @@ -1,26 +0,0 @@ -- hosts: ~tag_Name_app(10|20)_carn - sudo: True - vars_prompt: - - name: "migrate_db" - prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)" - default: "no" - private: no - vars: - secure_dir: '../../../configuration-secure/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - #local_dir: '../../../edx-secret/ansible/local' - local_dir: "{{secure_dir}}/local" - # this toggles http basic auth on and off. false in production - not_prod: false - vars_files: - - "{{ secure_dir }}/vars/edxapp_carnegie_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - roles: - - common - - supervisor - - {'role': 'nginx', 'nginx_conf': true} - - {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'carnegie'} - # run this role last - # - in_production diff --git a/playbooks/edx-west/carnegie-prod-worker.yml b/playbooks/edx-west/carnegie-prod-worker.yml deleted file mode 100644 index a099682c7e4..00000000000 --- a/playbooks/edx-west/carnegie-prod-worker.yml +++ /dev/null @@ -1,19 +0,0 @@ -# this gets all running prod webservers -- hosts: tag_environment_prod_carn:&tag_function_util -# or we can get subsets of them by name -#- hosts: ~tag_Name_util(10)_carn - sudo: True - vars: - secure_dir: '../../../edx-secret/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: '../../../../../../edx-secret/ansible/local' - migrate_db: "no" - vars_files: - - "{{ secure_dir }}/vars/edxapp_carnegie_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - roles: - - common - - supervisor - - { role: 'edxapp', celery_worker: True } diff --git a/playbooks/edx-west/cloudformation.yml b/playbooks/edx-west/cloudformation.yml deleted file mode 100644 index 0fe9f50e482..00000000000 --- a/playbooks/edx-west/cloudformation.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# This playbook demonstrates how to use the ansible cloudformation module to launch an AWS CloudFormation stack. -# -# This module requires that the boto python library is installed, and that you have your AWS credentials -# in $HOME/.boto - -#The thought here is to bring up a bare infrastructure with CloudFormation, but use ansible to configure it. -#I generally do this in 2 different playbook runs as to allow the ec2.py inventory to be updated. - -#This module also uses "complex arguments" which were introduced in ansible 1.1 allowing you to specify the -#Cloudformation template parameters - -#This example launches a 3 node AutoScale group, with a security group, and an InstanceProfile with root permissions. - -#If a stack does not exist, it will be created. If it does exist and the template file has changed, the stack will be updated. -#If the parameters are different, the stack will also be updated. - -#CloudFormation stacks can take awhile to provision, if you are curious about its status, use the AWS -#web console or one of the CloudFormation CLI's. - -#Example update -- try first launching the stack with 3 as the ClusterSize. After it is launched, change it to 4 -#and run the playbook again. - -- name: provision stack - hosts: localhost - connection: local - gather_facts: false - - # Launch the cloudformation-example.json template. Register the output. - - tasks: - - name: edX configuration - cloudformation: > - stack_name="$name" state=present - region=$region disable_rollback=false - template=../cloudformation_templates/edx-server-multi-instance.json - args: - template_parameters: - KeyName: $key - InstanceType: m1.small - GroupTag: $group - register: stack - - name: show stack outputs - debug: msg="My stack outputs are ${stack.stack_outputs}" diff --git a/playbooks/edx-west/cme-prod-app.yml b/playbooks/edx-west/cme-prod-app.yml deleted file mode 100644 index a725cf4ecc5..00000000000 --- a/playbooks/edx-west/cme-prod-app.yml +++ /dev/null @@ -1,46 +0,0 @@ - -# set up the fireball transport -#- hosts: ~tag_Name_app(10|20)_cme -# gather_facts: no -# connection: ssh # or paramiko -# sudo: yes -# tasks: -# - apt: pkg=gcc state=present -# - apt: pkg=libzmq-dev,python-zmq state=present -# - action: fireball - - -# this gets all running prod webservers -#- hosts: tag_environment_prod:&tag_function_webserver -# or we can get subsets of them by name -- hosts: ~tag_Name_app(10|20)_cme - sudo: True - vars_prompt: - - name: "migrate_db" - prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)" - default: "no" - private: no - vars: - secure_dir: '../../../edx-secret/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: '../../../../../../edx-secret/ansible/local' - not_prod: true - vars_files: - - "{{ secure_dir }}/vars/edxapp_cme_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - roles: - - common - - supervisor - - role: nginx - nginx_conf: true - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'} - # run this role last - # - in_production diff --git a/playbooks/edx-west/cme-prod-worker.yml b/playbooks/edx-west/cme-prod-worker.yml deleted file mode 100644 index af873230c59..00000000000 --- a/playbooks/edx-west/cme-prod-worker.yml +++ /dev/null @@ -1,19 +0,0 @@ -# this gets all running prod webservers -- hosts: tag_environment_prod_cme:&tag_function_util -# or we can get subsets of them by name -#- hosts: ~tag_Name_util(10)_cme - sudo: True - vars: - secure_dir: '../../../edx-secret/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: '../../../../../../edx-secret/ansible/local' - migrate_db: "no" - vars_files: - - "{{ secure_dir }}/vars/edxapp_cme_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - roles: - - common - - supervisor - - { role: 'edxapp', celery_worker: True } diff --git a/playbooks/edx-west/ec2.ini b/playbooks/edx-west/ec2.ini deleted file mode 100644 index 9a2814ffeaa..00000000000 --- a/playbooks/edx-west/ec2.ini +++ /dev/null @@ -1,8 +0,0 @@ -[ec2] -regions=us-west-1 -regions_exclude = us-gov-west-1 -destination_variable=public_dns_name -vpc_destination_variable=private_dns_name -cache_path=/tmp -cache_max_age=300 -route53=False diff --git a/playbooks/edx-west/ec2.py b/playbooks/edx-west/ec2.py deleted file mode 120000 index 5c6f177c184..00000000000 --- a/playbooks/edx-west/ec2.py +++ /dev/null @@ -1 +0,0 @@ -../ec2.py \ No newline at end of file diff --git a/playbooks/edx-west/edxapp_rolling_example.yml b/playbooks/edx-west/edxapp_rolling_example.yml deleted file mode 100644 index 44edb3b3637..00000000000 --- a/playbooks/edx-west/edxapp_rolling_example.yml +++ /dev/null @@ -1,34 +0,0 @@ -# ansible-playbook -v --user=ubuntu edxapp_rolling_example.yml -i ./ec2.py --private-key=/path/to/deployment.pem - -- hosts: tag_Group_anothermulti - serial: 2 - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - pre_tasks: - - name: Gathering ec2 facts - ec2_facts: - - name: Removing instance from the ELB - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - state: 'absent' - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - edxapp - - ruby - post_tasks: - - name: Adding instance back to the ELB - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ ec2_elbs }}" - state: 'present' diff --git a/playbooks/edx-west/files b/playbooks/edx-west/files deleted file mode 120000 index feb122881ce..00000000000 --- a/playbooks/edx-west/files +++ /dev/null @@ -1 +0,0 @@ -../files \ No newline at end of file diff --git a/playbooks/edx-west/group_vars b/playbooks/edx-west/group_vars deleted file mode 120000 index cc7e7a90f91..00000000000 --- a/playbooks/edx-west/group_vars +++ /dev/null @@ -1 +0,0 @@ -../group_vars \ No newline at end of file diff --git a/playbooks/edx-west/prod-app.yml b/playbooks/edx-west/prod-app.yml deleted file mode 100644 index 5968db9d4f7..00000000000 --- a/playbooks/edx-west/prod-app.yml +++ /dev/null @@ -1,41 +0,0 @@ -# this gets all running prod webservers -#- hosts: tag_environment_prod:&tag_function_webserver -# or we can get subsets of them by name -#- hosts: ~tag_Name_app(10|20)_prod -- hosts: ~tag_Name_app(11|21)_prod -## this is the test box -#- hosts: ~tag_Name_app4_prod -## you can also do security group, but don't do that -#- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB - sudo: True - vars_prompt: - - name: "migrate_db" - prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)" - default: "no" - private: no - vars: - secure_dir: '../../../configuration-secure/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: '../../../configuration-secure/ansible/local' - not_prod: false - vars_files: - - "{{ secure_dir }}/vars/edxapp_prod_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - - "{{ secure_dir }}/vars/shib_prod_vars.yml" - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - edxapp - - apache - - shibboleth - # run this role last - - in_production diff --git a/playbooks/edx-west/prod-jumpbox.yml b/playbooks/edx-west/prod-jumpbox.yml deleted file mode 100644 index fad8c08ec0d..00000000000 --- a/playbooks/edx-west/prod-jumpbox.yml +++ /dev/null @@ -1,25 +0,0 @@ -- hosts: tag_Name_jumpbox_prod - sudo: True - vars_files: - - "{{ secure_dir }}/vars/users_jumpbox.yml" - vars: - secure_dir: '../../../configuration-secure/ansible' - local_dir: '../../../configuration-secure/ansible/local' - roles: - - common - - supervisor - - role: gh_users - gh_users: - - sefk - - jbau - - jrbl - - ali123 - - caesar2164 - - dcadams - - nparlante - gh_users_no_sudo: - - jinpa - - gbruhns - - paepcke - - akshayak - tags: users diff --git a/playbooks/edx-west/prod-log.yml b/playbooks/edx-west/prod-log.yml deleted file mode 100644 index 3f721f2e5c3..00000000000 --- a/playbooks/edx-west/prod-log.yml +++ /dev/null @@ -1,9 +0,0 @@ -- hosts: tag_Name_log10_prod - sudo: True - vars_files: - - "{{ secure_dir }}/vars/users.yml" - vars: - secure_dir: '../../../configuration-secure/ansible' - local_dir: '../../../configuration-secure/ansible/local' - roles: - - common diff --git a/playbooks/edx-west/prod-ora.yml b/playbooks/edx-west/prod-ora.yml deleted file mode 100644 index 46445a23eee..00000000000 --- a/playbooks/edx-west/prod-ora.yml +++ /dev/null @@ -1,25 +0,0 @@ -# this gets all running prod webservers -#- hosts: tag_environment_prod:&tag_function_ora -# or we can get subsets of them by name -#- hosts: ~tag_Name_ora(10|11)_prod -- hosts: ~tag_Name_ora10_prod -#- hosts: ~tag_Name_ora11_prod -#- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB - sudo: True - vars: - secure_dir: '../../../configuration-secure/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: '../../../configuration-secure/ansible/local' - migrate_db: "no" - vars_files: - - "{{ secure_dir }}/vars/ora_prod_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - ora - - ora diff --git a/playbooks/edx-west/prod-worker.yml b/playbooks/edx-west/prod-worker.yml deleted file mode 100644 index 6cb1cda8b4f..00000000000 --- a/playbooks/edx-west/prod-worker.yml +++ /dev/null @@ -1,40 +0,0 @@ -# For all util machines -- hosts: tag_environment_prod:&tag_function_util -# or we can get subsets of them by name -#- hosts: ~tag_Name_util(1|2)_prod - sudo: True - vars: - secure_dir: '../../../configuration-secure/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: '../../../configuration-secure/ansible/local' - migrate_db: "no" - vars_files: - - "{{ secure_dir }}/vars/edxapp_prod_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - - "{{ secure_dir }}/vars/shib_prod_vars.yml" - roles: - - common - - supervisor - - { role: 'edxapp', celery_worker: True } - -# -# COMMENT OUT THE NOTIFIER UNTIL IT IS READY -# - -# run the notifier on the first util machine only -#- hosts: ~tag_Name_util10_prod -# sudo: True -# vars: -# secure_dir: '../../../configuration-secure/ansible' -# migrate_db: "no" -# vars_files: -# - "{{ secure_dir }}/vars/edxapp_prod_vars.yml" -# - "{{ secure_dir }}/vars/notifier_prod_vars.yml" -# roles: -# - role: virtualenv -# virtualenv_user: "notifier" -# virtualenv_user_home: "/opt/wwc/notifier" -# virtualenv_name: "notifier" -# - notifier diff --git a/playbooks/edx-west/prod-xqueue.yml b/playbooks/edx-west/prod-xqueue.yml deleted file mode 100644 index 391b3fe0535..00000000000 --- a/playbooks/edx-west/prod-xqueue.yml +++ /dev/null @@ -1,22 +0,0 @@ -# this gets all running prod webservers -- hosts: tag_environment_prod:&tag_function_xqueue -# or we can get subsets of them by name -#- hosts: ~tag_Name_xserver(1|2)_prod -#- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB - sudo: True - vars: - secure_dir: '../../../configuration-secure/ansible' - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: '../../../configuration-secure/ansible/local' - vars_files: - - "{{ secure_dir }}/vars/xqueue_prod_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_prod_users.yml" - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - xqueue - - xqueue diff --git a/playbooks/edx-west/roles b/playbooks/edx-west/roles deleted file mode 120000 index d8c4472ca1b..00000000000 --- a/playbooks/edx-west/roles +++ /dev/null @@ -1 +0,0 @@ -../roles \ No newline at end of file diff --git a/playbooks/edx-west/secure_example b/playbooks/edx-west/secure_example deleted file mode 120000 index 776016b059f..00000000000 --- a/playbooks/edx-west/secure_example +++ /dev/null @@ -1 +0,0 @@ -../secure_example/ \ No newline at end of file diff --git a/playbooks/edx-west/stage-all.yml b/playbooks/edx-west/stage-all.yml deleted file mode 100644 index c2197497345..00000000000 --- a/playbooks/edx-west/stage-all.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- hosts: tag_environment_stage:!tag_function_nat - # exclude nat host b/c we can't log in anyway - #- hosts: tag_Name_bastion_stage - sudo: True - vars: - secure_dir: ../../../edx-secret/ansible - local_dir: ../../../edx-secret/ansible/local - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_stage_users.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - roles: - - common - - supervisor - - datadog - diff --git a/playbooks/edx-west/stage-ansible.cfg b/playbooks/edx-west/stage-ansible.cfg deleted file mode 100644 index 022db13a4bf..00000000000 --- a/playbooks/edx-west/stage-ansible.cfg +++ /dev/null @@ -1,20 +0,0 @@ -# ansible reads $ANSIBLE_CONFIG, ansible.cfg, ~/.ansible.cfg or /etc/ansible/ansible.cfg - -[defaults] -# Always have these for using the configuration repo -jinja2_extensions=jinja2.ext.do -hash_behaviour=merge - -# These are environment-specific defaults -forks=10 -#forks=1 -log_path=~/stage-edx-ansible.log -transport=ssh -hostfile=./ec2.py -extra_vars='key=deployment name=edx-stage group=edx-stage region=us-west-1' -user=ubuntu - -[ssh_connection] -# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg -ssh_args=-F stage-ssh-config -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r -scp_if_ssh=True diff --git a/playbooks/edx-west/stage-app.yml b/playbooks/edx-west/stage-app.yml deleted file mode 100644 index df5ae21064e..00000000000 --- a/playbooks/edx-west/stage-app.yml +++ /dev/null @@ -1,33 +0,0 @@ -- hosts: tag_environment_stage:&tag_function_webserver -#- hosts: tag_Name_app1_stage - sudo: True - vars_prompt: - - name: "migrate_db" - prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)" - default: "no" - private: no - vars: - not_prod: true - secure_dir: ../../../edx-secret/ansible - local_dir: ../../../edx-secret/ansible/local - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_stage_users.yml" - - "{{ secure_dir }}/vars/shib_stage_vars.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - edxapp - - apache - - shibboleth - - datadog - #- splunkforwarder diff --git a/playbooks/edx-west/stage-debug.yml b/playbooks/edx-west/stage-debug.yml deleted file mode 100644 index 51f30dc7119..00000000000 --- a/playbooks/edx-west/stage-debug.yml +++ /dev/null @@ -1,27 +0,0 @@ -- hosts: localhost -#- hosts: tag_Name_app1_stage - vars: - migrate_db: "no" - not_prod: true - secure_dir: ../../../edx-secret/ansible - local_dir: ../../../edx-secret/ansible/local - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_stage_users.yml" - #- "{{ secure_dir }}/vars/shib_stage_vars.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - lms - - cms - - lms-preview - nginx_default_sites: - - lms - - edxapp - - ansible_debug - #- apache - #- shibboleth diff --git a/playbooks/edx-west/stage-jumpbox.yml b/playbooks/edx-west/stage-jumpbox.yml deleted file mode 100644 index 55e8513ec89..00000000000 --- a/playbooks/edx-west/stage-jumpbox.yml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: tag_Name_jumpbox_stage - sudo: True - vars_files: - - "{{ secure_dir }}/vars/users_jumpbox.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - vars: - secure_dir: '../../../configuration-secure/ansible' - local_dir: '../../../configuration-secure/ansible/local' - roles: - - common - - datadog diff --git a/playbooks/edx-west/stage-log.yml b/playbooks/edx-west/stage-log.yml deleted file mode 100644 index 432354c321d..00000000000 --- a/playbooks/edx-west/stage-log.yml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: tag_Name_log10_stage - sudo: True - vars_files: - - "{{ secure_dir }}/vars/users_jumpbox.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - vars: - secure_dir: '../../../configuration-secure/ansible' - local_dir: '../../../configuration-secure/ansible/local' - roles: - - common - - datadog diff --git a/playbooks/edx-west/stage-notifier-only.yml b/playbooks/edx-west/stage-notifier-only.yml deleted file mode 100644 index c1ae5031830..00000000000 --- a/playbooks/edx-west/stage-notifier-only.yml +++ /dev/null @@ -1,16 +0,0 @@ -# run the notifier on the first util machine only -- hosts: ~tag_Name_util10_stage - sudo: True - vars: - secure_dir: '../../../configuration-secure/ansible' - migrate_db: "no" - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/notifier_stage_vars.yml" - roles: - - supervisor - - role: virtualenv - virtualenv_user: "notifier" - virtualenv_user_home: "/opt/wwc/notifier" - virtualenv_name: "notifier" - - notifier diff --git a/playbooks/edx-west/stage-ora.yml b/playbooks/edx-west/stage-ora.yml deleted file mode 100644 index 85c5bf0357f..00000000000 --- a/playbooks/edx-west/stage-ora.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- hosts: tag_environment_stage:&tag_function_ora - sudo: True - vars: - secure_dir: ../../../configuration-secure/ansible - local_dir: ../../../configuration-secure/ansible/local - migrate_db: "yes" - vars_files: - - "{{ secure_dir }}/vars/ora_stage_vars.yml" - - "{{ secure_dir }}/vars/edxapp_stage_users.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - ora - - ora - - datadog - #- splunkforwarder diff --git a/playbooks/edx-west/stage-rabbit.yml b/playbooks/edx-west/stage-rabbit.yml deleted file mode 100644 index e8e64b94946..00000000000 --- a/playbooks/edx-west/stage-rabbit.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- hosts: tag_environment_stage:&tag_group_rabbitmq - sudo: True - vars: - secure_dir: ../../../edx-secret/ansible - local_dir: ../../../edx-secret/ansible/local - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_stage_users.yml" - roles: - - common - - supervisor - - rabbitmq - -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_edxapp -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - nginx -# - edxapp -# - { role: 'edxapp', celery_worker: True } -# -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_xserver -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - nginx -# - xserver -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_rabbitmq -# serial: 1 -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - rabbitmq -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_xqueue -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - nginx -# - xqueue diff --git a/playbooks/edx-west/stage-ssh-config b/playbooks/edx-west/stage-ssh-config deleted file mode 100644 index 48a0f26e4aa..00000000000 --- a/playbooks/edx-west/stage-ssh-config +++ /dev/null @@ -1,20 +0,0 @@ -#### edx-stage VPC - -Host 54.241.183.3 -#Host vpc-jumpbox - HostName 54.241.183.3 - User ubuntu - ForwardAgent yes - -Host *.us-west-1.compute.internal - User ubuntu - ForwardAgent yes - ProxyCommand ssh -W %h:%p ubuntu@54.241.183.3 - -Host * - ForwardAgent yes - SendEnv LANG LC_* - HashKnownHosts yes - GSSAPIAuthentication yes - GSSAPIDelegateCredentials no - diff --git a/playbooks/edx-west/stage-worker.yml b/playbooks/edx-west/stage-worker.yml deleted file mode 100644 index d3b553bb28c..00000000000 --- a/playbooks/edx-west/stage-worker.yml +++ /dev/null @@ -1,38 +0,0 @@ - # this gets all running stage util machiens -- hosts: tag_environment_stage:&tag_function_util -# or we can get subsets of them by name -#- hosts: ~tag_Name_util(1|2)_stage - sudo: True - vars: - secure_dir: ../../../edx-secret/ansible - # this indicates the path to site-specific (with precedence) - # things like nginx template files - local_dir: ../../../edx-secret/ansible/local - migrate_db: "no" - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_stage_users.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - roles: - - common - - supervisor - - { role: 'edxapp', celery_worker: True } - - datadog - #- splunkforwarder - -# run the notifier on the first util machine only -- hosts: ~tag_Name_util10_stage - sudo: True - vars: - secure_dir: '../../../configuration-secure/ansible' - migrate_db: "no" - vars_files: - - "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - - "{{ secure_dir }}/vars/notifier_stage_vars.yml" - roles: - - role: virtualenv - virtualenv_user: "notifier" - virtualenv_user_home: "/opt/wwc/notifier" - virtualenv_name: "notifier" - - notifier diff --git a/playbooks/edx-west/stage-xqueue.yml b/playbooks/edx-west/stage-xqueue.yml deleted file mode 100644 index d5ad0893d46..00000000000 --- a/playbooks/edx-west/stage-xqueue.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- hosts: tag_environment_stage:&tag_group_xqueue - sudo: True - vars: - secure_dir: ../../../edx-secret/ansible - local_dir: ../../../edx-secret/ansible/local - vars_files: - - "{{ secure_dir }}/vars/xqueue_stage_vars.yml" - - "{{ secure_dir }}/vars/users.yml" - - "{{ secure_dir }}/vars/edxapp_stage_users.yml" - - "{{ secure_dir }}/vars/datadog_stage.yml" - roles: - - common - - supervisor - - role: nginx - nginx_sites: - - xqueue - - xqueue - - datadog - #- splunkforwarder - -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_edxapp -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - nginx -# - edxapp -# - { role: 'edxapp', celery_worker: True } -# -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_xserver -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - nginx -# - xserver -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_rabbitmq -# serial: 1 -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - rabbitmq -#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_xqueue -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/edx_continuous_integration_vars.yml" -# - "{{ secure_dir }}/vars/users.yml" -# roles: -# - common -# - nginx -# - xqueue diff --git a/playbooks/edx_ansible.yml b/playbooks/edx_ansible.yml new file mode 100644 index 00000000000..f29e7354497 --- /dev/null +++ b/playbooks/edx_ansible.yml @@ -0,0 +1,10 @@ +- name: Install edx_ansible + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - edx_ansible diff --git a/playbooks/edx_continuous_integration.yml b/playbooks/edx_continuous_integration.yml new file mode 100644 index 00000000000..fdc34ee3841 --- /dev/null +++ b/playbooks/edx_continuous_integration.yml @@ -0,0 +1,92 @@ +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + serial: 10 + vars: + migrate_db: "yes" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - cms + - lms + - xqueue + - learner_portal + - program_console + - prospectus + - edx_exams + - subscriptions + nginx_default_sites: + - lms + - docker-tools + - mysql + - role: edxlocal + tags: edxlocal + - memcache + - role: mongo_5_0 + when: MONGO_5_0_ENABLED + - role: mongo_6_0 + when: MONGO_6_0_ENABLED + - role: mongo_7_0 + when: MONGO_7_0_ENABLED + - role: redis + - { role: "edxapp", celery_worker: True, when: edxapp_containerized is defined and not edxapp_containerized } + - { role: "edxapp", when: edxapp_containerized is defined and not edxapp_containerized } + - { role: "testcourses", when: edxapp_containerized is defined and not edxapp_containerized } + - oraclejdk + - elasticsearch + - opensearch + - forum + - { role: "xqueue", update_users: True } + - edx_ansible + - analytics_api + - ecommerce + - credentials + - discovery + - role: registrar + when: REGISTRAR_ENABLED + - role: license_manager + when: LICENSE_MANAGER_ENABLED + - role: commerce_coordinator + when: COMMERCE_COORDINATOR_ENABLED + - role: enterprise_catalog + when: ENTERPRISE_CATALOG_ENABLED + - role: learner_portal + when: LEARNER_PORTAL_ENABLED + - role: program_console + when: PROGRAM_CONSOLE_ENABLED + - role: prospectus + when: PROSPECTUS_ENABLED + - role: authn + when: AUTHN_ENABLED + - role: payment + MFE_NAME: payment + when: PAYMENT_MFE_ENABLED + - role: mfe + MFE_NAME: learning + when: LEARNING_MFE_ENABLED + - role: course_authoring + MFE_NAME: course-authoring + when: COURSE_AUTHORING_MFE_ENABLED + - role: mfe + MFE_NAME: library-authoring + when: LIBRARY_AUTHORING_MFE_ENABLED + - role: mfe + MFE_NAME: ora-grading + when: ORA_GRADING_MFE_ENABLED + - role: mfe + MFE_NAME: profile + when: PROFILE_MFE_ENABLED + - role: mfe + MFE_NAME: learner-dashboard + when: LEARNER_DASHBOARD_MFE_ENABLED + - { role: oauth_client_setup, when: edxapp_containerized is defined and not edxapp_containerized } + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - flower diff --git a/playbooks/edx_jenkins_tests.yml b/playbooks/edx_jenkins_tests.yml new file mode 100644 index 00000000000..50a62fb5865 --- /dev/null +++ b/playbooks/edx_jenkins_tests.yml @@ -0,0 +1,31 @@ +- name: Configure instance(s) + hosts: jenkins_test + become: True + gather_facts: True + vars: + migrate_db: "yes" + ansible_ssh_private_key_file: /var/lib/jenkins/continuous-integration.pem + mysql5_workaround: True + vars_files: + - "{{ secure_dir }}/vars/edxapp_ref_users.yml" + - "{{ secure_dir }}/vars/edxapp_sandbox.yml" + # overrides specific to the jenkins test playbook + - "{{ secure_dir }}/vars/edx_jenkins_tests.yml" + roles: + - common + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - lms + - cms + - xqueue + nginx_default_sites: + - lms + - mysql + - edxlocal + - memcache + - mongo + - edxapp + - xqueue + - rabbitmq diff --git a/playbooks/edx_maintenance.yml b/playbooks/edx_maintenance.yml new file mode 100644 index 00000000000..89dc9f996af --- /dev/null +++ b/playbooks/edx_maintenance.yml @@ -0,0 +1,20 @@ +# Usage: +# +# By default this playbook will disable the maintenance mode +# +# Enable maintenance +# ansible-playbook ./edx_maintenance.yml -i host1.example.com,host2.example.com, -e '{"ENABLE_MAINTENANCE": True}' +# +# Disable maintenance +# ansible-playbook ./edx_maintenance.yml -i host1.example.com,host2.example.com, -e '{"ENABLE_MAINTENANCE": False}' +# ansible-playbook ./edx_maintenance.yml -i host1.example.com,host2.example.com, +# +- name: Deploy edxapp + hosts: all + become: True + gather_facts: True + vars_files: + - 'roles/nginx/defaults/main.yml' + - 'roles/supervisor/defaults/main.yml' + roles: + - role: edx_maintenance diff --git a/playbooks/edx_mirror.yml b/playbooks/edx_mirror.yml new file mode 100644 index 00000000000..7f2ed9ee3ad --- /dev/null +++ b/playbooks/edx_mirror.yml @@ -0,0 +1,13 @@ +# ansible-playbook --limit tag_Name_mirror edx_mirror.yml --user ubuntu -i ec2.py +- name: Configure instance(s) + hosts: all + become: True + gather_facts: False + roles: + - role: nginx + nginx_sites: + - gh_mirror + tags: ['r_nginx'] + - role: gh_mirror + tags: ['r_gh_mirror'] + diff --git a/playbooks/edx_provision.yml b/playbooks/edx_provision.yml new file mode 100644 index 00000000000..8b80c25ae08 --- /dev/null +++ b/playbooks/edx_provision.yml @@ -0,0 +1,105 @@ +- name: Create ec2 instance + hosts: localhost + connection: local + gather_facts: True + vars: + keypair: continuous-integration + instance_type: t2.medium + security_group: sandbox-vpc + # ubuntu 16.04 - 20170721 + ami: ami-cd0f5cb6 + region: us-east-1 + zone: us-east-1c + instance_tags: + environment: sandbox + github_username: temp + Name: sandbox-temp + source: provisioning-script + owner: temp + root_ebs_size: 50 + dns_name: temp + instance_initiated_shutdown_behavior: stop + dns_zone: sandbox.edx.org + name_tag: sandbox-temp + elb: false + ec2_vpc_subnet_id: subnet-cd867aba + instance_userdata: | + #!/bin/bash + set -x + set -e + export RUN_ANSIBLE=false; + wget https://raw.githubusercontent.com/edx/configuration/{{ CONFIGURATION_VERSION }}/util/install/ansible-bootstrap.sh -O - | bash; + launch_wait_time: 5 + roles: + - role: launch_ec2 + keypair: "{{ keypair }}" + instance_type: "{{ instance_type }}" + instance_initiated_shutdown_behavior: "{{ instance_initiated_shutdown_behavior }}" + security_group: "{{ security_group }}" + ami: "{{ ami }}" + region: "{{ region }}" + instance_tags: "{{ instance_tags }}" + root_ebs_size: "{{ root_ebs_size }}" + dns_name: "{{ dns_name }}" + dns_zone: "{{ dns_zone }}" + zone: "{{ zone }}" + vpc_subnet_id: "{{ ec2_vpc_subnet_id }}" + assign_public_ip: yes + terminate_instance: true + instance_profile_name: sandbox + user_data: "{{ instance_userdata }}" + launch_ec2_wait_time: "{{ launch_wait_time }}" + +- name: Configure instance(s) + hosts: launched + become: True + gather_facts: False + vars: + elb: False + pre_tasks: + - name: Wait for cloud-init to finish + wait_for: + path: /var/log/cloud-init.log + timeout: 15 + search_regex: "final-message" + - name: gather_facts + setup: "" + vars_files: + - roles/edxapp/defaults/main.yml + - roles/xqueue/defaults/main.yml + - roles/forum/defaults/main.yml + roles: + # rerun common to set the hostname, nginx to set basic auth + - common + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - edx-sandbox + - role: nginx + nginx_sites: + - cms + - lms + - xqueue + - forum + nginx_default_sites: + - lms + - docker-tools + post_tasks: + - name: get instance id for elb registration + local_action: + module: ec2_lookup + region: us-east-1 + tags: + - Name: "{{ name_tag }}" + register: ec2_info + when: elb + become: False + - name: register instance into an elb if one was provided + local_action: + module: ec2_elb + region: "{{ region }}" + instance_id: "{{ ec2_info.instance_ids[0] }}" + state: present + ec2_elbs: + - "{{ elb }}" + when: elb + become: False diff --git a/playbooks/edx_sandbox.yml b/playbooks/edx_sandbox.yml deleted file mode 100644 index 9144613e358..00000000000 --- a/playbooks/edx_sandbox.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- - -# Example sandbox configuration -# for single server community -# installs - -- name: Configure instance(s) - hosts: all - sudo: True - gather_facts: True - vars: - migrate_db: "yes" - openid_workaround: True - EDXAPP_LMS_NGINX_PORT: '80' - edx_platform_version: 'master' - roles: - - role: nginx - nginx_sites: - - cms - - lms - - forum - - ora - - xqueue - nginx_default_sites: - - lms - - edxlocal - - mongo - - { role: 'edxapp', celery_worker: True } - - edxapp - - demo - - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - - oraclejdk - - elasticsearch - - forum - - { role: "xqueue", update_users: True } - - ora - - discern - - edx_ansible diff --git a/playbooks/edxapp.yml b/playbooks/edxapp.yml new file mode 100644 index 00000000000..2364f0f585a --- /dev/null +++ b/playbooks/edxapp.yml @@ -0,0 +1,52 @@ +- name: Deploy edxapp + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + CLUSTER_NAME: 'edxapp' + NGINX_ENABLE_SSL: False + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: automated + AUTOMATED_USERS: "{{ EDXAPP_AUTOMATED_USERS | default({}) }}" + - role: nginx + nginx_sites: + - lms + - cms + nginx_default_sites: "{{ EDXAPP_NGINX_DEFAULT_SITES }}" + nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}" + nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}" + nginx_skip_enable_sites: "{{ EDXAPP_NGINX_SKIP_ENABLE_SITES }}" + - edxapp + - role: sqlite_fix + when: devstack is defined and devstack + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: minos + when: COMMON_ENABLE_MINOS + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + when: "EDXAPP_HERMES_ENABLED" + HERMES_JITTER: 600 + HERMES_ALLOWED_SUDO_COMMANDS: + - "/bin/cp {{ hermes_download_dir }}/lms.yml {{ COMMON_CFG_DIR }}/lms.yml" + - "/bin/cp {{ hermes_download_dir }}/studio.yml {{ COMMON_CFG_DIR }}/studio.yml" + - "/edx/app/edxapp/reload_lms_config.sh" + - "/edx/app/edxapp/reload_cms_config.sh" + HERMES_SERVICE_CONFIG: + - url: '{{ HERMES_REMOTE_FILE_LOCATION }}/{{ COMMON_ENVIRONMENT }}/lms.yml' + filename: '{{ hermes_download_dir }}/lms.yml' + command: "sudo /bin/cp {{ hermes_download_dir }}/lms.yml {{ COMMON_CFG_DIR }}/lms.yml && sudo /edx/app/edxapp/reload_lms_config.sh" + secret_key_files: "{{ HERMES_PRIVATE_KEYS_DICT | map('regex_replace','^(.*)$','/edx/app/hermes/hermes-\\1') | join(',') if HERMES_PRIVATE_KEYS_DICT is defined else None }}" + - url: '{{ HERMES_REMOTE_FILE_LOCATION }}/{{ COMMON_ENVIRONMENT }}/studio.yml' + filename: '{{ hermes_download_dir }}/studio.yml' + command: "sudo /bin/cp {{ hermes_download_dir }}/studio.yml {{ COMMON_CFG_DIR }}/studio.yml && sudo /edx/app/edxapp/reload_cms_config.sh" + secret_key_files: "{{ HERMES_PRIVATE_KEYS_DICT | map('regex_replace','^(.*)$','/edx/app/hermes/hermes-\\1') | join(',') if HERMES_PRIVATE_KEYS_DICT is defined else None }}" diff --git a/playbooks/elasticsearch.yml b/playbooks/elasticsearch.yml new file mode 100644 index 00000000000..e5c3ff6954d --- /dev/null +++ b/playbooks/elasticsearch.yml @@ -0,0 +1,45 @@ +- hosts: all + become: True + vars: + # By default take instances in and out of the elb(s) they + # are attached to + # To skip elb operations use "-e elb_pre_post=fase" + elb_pre_post: true + # Number of instances to operate on at a time + serial_count: 1 + CLUSTER_NAME: "commoncluster" + serial: "{{ serial_count }}" + pre_tasks: + - action: ec2_metadata_facts + when: elb_pre_post + - debug: + var: ansible_ec2_instance_id + when: elb_pre_post + - name: Instance De-register + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + region: us-east-1 + state: absent + wait_timeout: 60 + become: False + when: elb_pre_post + roles: + - common + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - elasticsearch + post_tasks: + - debug: + var: ansible_ec2_instance_id + when: elb_pre_post + - name: Register instance in the elb + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ ec2_elbs }}" + region: us-east-1 + state: present + wait_timeout: 60 + become: False + when: elb_pre_post diff --git a/playbooks/enterprise_catalog.yml b/playbooks/enterprise_catalog.yml new file mode 100644 index 00000000000..c773022b4dd --- /dev/null +++ b/playbooks/enterprise_catalog.yml @@ -0,0 +1,21 @@ +- name: Deploy edX designer + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'enterprise_catalog' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - enterprise_catalog + - enterprise_catalog + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'enterprise_catalog' + when: ENTERPRISE_CATALOG_HERMES_ENABLED diff --git a/playbooks/files/edx-server-ubuntu-configuration.json b/playbooks/files/edx-server-ubuntu-configuration.json deleted file mode 100644 index 4dc5b720c6e..00000000000 --- a/playbooks/files/edx-server-ubuntu-configuration.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "AWSTemplateFormatVersion": "2010-09-09", - - "Description": "Sample template to bring up an Edx Server. A WaitCondition is used to hold up the stack creation until the application is deployed. **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters": { - - "NameTag": { - "Type": "String", - "Description": "Name Tag" - }, - "GroupTag": { - "Type": "String", - "Description": "Group Tag" - }, - "KeyName": { - "Type": "String", - "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the web server" - }, - "InstanceType" : { - "Description" : "WebServer EC2 instance type", - "Type" : "String", - "Default" : "m1.small", - "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], - "ConstraintDescription" : "must be a valid EC2 instance type." - }, - "SSHLocation" : { - "Description" : "The IP address range that can be used to SSH to the EC2 instances", - "Type": "String", - "MinLength": "9", - "MaxLength": "18", - "Default": "0.0.0.0/0", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." - } - }, - - "Mappings" : { - - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "64" }, - "m1.small" : { "Arch" : "64" }, - "m1.medium" : { "Arch" : "64" }, - "m1.large" : { "Arch" : "64" }, - "m1.xlarge" : { "Arch" : "64" }, - "m2.xlarge" : { "Arch" : "64" }, - "m2.2xlarge" : { "Arch" : "64" }, - "m2.4xlarge" : { "Arch" : "64" }, - "m3.xlarge" : { "Arch" : "64" }, - "m3.2xlarge" : { "Arch" : "64" }, - "c1.medium" : { "Arch" : "64" }, - "c1.xlarge" : { "Arch" : "64" } - }, - - "AWSRegionArch2AMI" : { - "us-east-1" : { "32" : "ami-def89fb7", "64" : "ami-d0f89fb9" }, - "us-west-1" : { "32" : "ami-fc002cb9", "64" : "ami-fe002cbb" }, - "us-west-2" : { "32" : "ami-0ef96e3e", "64" : "ami-70f96e40" }, - "eu-west-1" : { "32" : "ami-c27b6fb6", "64" : "ami-ce7b6fba" }, - "sa-east-1" : { "32" : "ami-a1da00bc", "64" : "ami-a3da00be" }, - "ap-southeast-1" : { "32" : "ami-66084734", "64" : "ami-64084736" }, - "ap-southeast-2" : { "32" : "ami-06ea7a3c", "64" : "ami-04ea7a3e" }, - "ap-northeast-1" : { "32" : "ami-fc6ceefd", "64" : "ami-fe6ceeff" } - } - }, - - "Resources" : { - - "EdxServerUser" : { - "Type" : "AWS::IAM::User", - "Properties" : { - "Path": "/", - "Policies": [{ - "PolicyName": "root", - "PolicyDocument": { "Statement":[{ - "Effect":"Allow", - "Action": [ - "cloudformation:DescribeStackResource", - "s3:Put" - ], - "Resource":"*" - }]} - }] - } - }, - - "HostKeys" : { - "Type" : "AWS::IAM::AccessKey", - "Properties" : { - "UserName" : {"Ref": "EdxServerUser"} - } - }, - - "EdxServer": { - "Type": "AWS::EC2::Instance", - "Metadata" : { - "AWS::CloudFormation::Init" : { - "config" : { - "packages" : { - "apt" : { - "ruby" : [], - "ruby-dev" : [], - "libopenssl-ruby" : [], - "rdoc" : [], - "ri" : [], - "irb" : [], - "build-essential" : [], - "wget" : [], - "ssl-cert" : [], - "rubygems" : [], - "git" : [], - "s3cmd" : [] - } - }, - "files" : { - "/home/ubuntu/.s3cfg" : { - "content" : { "Fn::Join" : ["", [ - "[default]\n", - "access_key = ", { "Ref" : "HostKeys" }, "\n", - "secret_key = ", {"Fn::GetAtt": ["HostKeys", "SecretAccessKey"]}, "\n", - "use_https = True\n" - ]]}, - "mode" : "000644", - "owner" : "ubuntu", - "group" : "ubuntu" - } - } - } - } - }, - "Properties": { - "Tags" : [ { - "Key" : "Name", - "Value" :{ "Ref": "NameTag" } - }, - { - "Key" : "Group", - "Value" : { "Ref": "GroupTag" } - } - ], - "SecurityGroups": [ { "Ref": "EdxServerSecurityGroup" } ], - "ImageId": { "Fn::FindInMap": [ "AWSRegionArch2AMI", { "Ref": "AWS::Region" }, { "Fn::FindInMap": [ "AWSInstanceType2Arch", { "Ref": "InstanceType" }, "Arch" ] } ] - }, - "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ - "#!/bin/bash\n", - "function error_exit\n", - "{\n", - " cfn-signal -e 1 -r \"$1\" '", { "Ref" : "EdxServerWaitHandle" }, "'\n", - " exit 1\n", - "}\n", - - "apt-get update\n", - "apt-get -y install python-setuptools\n", - "echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n", - "easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", - "echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n", - "cfn-init --region ", { "Ref" : "AWS::Region" }, - " -s ", { "Ref" : "AWS::StackId" }, " -r EdxServer ", - " --access-key ", { "Ref" : "HostKeys" }, - " --secret-key ", {"Fn::GetAtt": ["HostKeys", "SecretAccessKey"]}, " || error_exit 'Failed to run cfn-init'\n", - "echo \"cfn-init run \" - `date` >> /home/ubuntu/cflog.txt\n", - "# If all went well, signal success\n", - "cfn-signal -e $? -r 'Edx Server configuration' '", { "Ref" : "EdxServerWaitHandle" }, "'\n" - ]]}}, - "KeyName": { "Ref": "KeyName" }, - "InstanceType": { "Ref": "InstanceType" } - } - }, - - "EdxServerSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Open up SSH access plus Edx Server required ports", - "SecurityGroupIngress" : [ - { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": { "Ref" : "SSHLocation"} }, - { "IpProtocol": "tcp", "FromPort": "4000", "ToPort": "4000", "SourceSecurityGroupName": { "Ref" :"EdxClientSecurityGroup" }}, - { "IpProtocol": "tcp", "FromPort": "4040", "ToPort": "4040", "CidrIp": "0.0.0.0/0"} - ] - } - }, - - "EdxClientSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Group with access to Edx Server" - } - }, - - "EdxServerWaitHandle" : { - "Type" : "AWS::CloudFormation::WaitConditionHandle" - }, - - "EdxServerWaitCondition" : { - "Type" : "AWS::CloudFormation::WaitCondition", - "DependsOn" : "EdxServer", - "Properties" : { - "Handle" : { "Ref" : "EdxServerWaitHandle" }, - "Timeout" : "1200" - } - } - }, - - "Outputs" : { - "EdxSecurityGroup" : { - "Description" : "EC2 Security Group with access to the Edx server", - "Value" : { "Ref" :"EdxClientSecurityGroup" } - } - } -} diff --git a/playbooks/files/examples/EC2_Instance_With_Block_Device_Mapping.json b/playbooks/files/examples/EC2_Instance_With_Block_Device_Mapping.json deleted file mode 100644 index b0ddb6951a3..00000000000 --- a/playbooks/files/examples/EC2_Instance_With_Block_Device_Mapping.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "AWSTemplateFormatVersion" : "2010-09-09", - - "Description" : "AWS CloudFormation Sample Template EC2_Instance_With_Block_Device_Mapping: Example to show how to attach EBS volumes and modify the root device using EC2 block device mappings. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters" : { - "InstanceType" : { - "Description" : "WebServer EC2 instance type", - "Type" : "String", - "Default" : "m1.small", - "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m3.xlarge","m3.2xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge","hi1.4xlarge","hs1.8xlarge"], - "ConstraintDescription" : "must be a valid EC2 instance type." - }, - - "KeyName" : { - "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the web server", - "Type" : "String" - }, - - "SSHFrom": { - "Description": "Lockdown SSH access to the bastion host (default can be accessed from anywhere)", - "Type": "String", - "MinLength": "9", - "MaxLength": "18", - "Default": "0.0.0.0/0", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription": "must be a valid CIDR range of the form x.x.x.x/x." - } - }, - - "Mappings" : { - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "PV64" }, - - "m1.small" : { "Arch" : "PV64" }, - "m1.medium" : { "Arch" : "PV64" }, - "m1.large" : { "Arch" : "PV64" }, - "m1.xlarge" : { "Arch" : "PV64" }, - - "m3.xlarge" : { "Arch" : "PV64" }, - "m3.2xlarge" : { "Arch" : "PV64" }, - - "m2.xlarge" : { "Arch" : "PV64" }, - "m2.2xlarge" : { "Arch" : "PV64" }, - "m2.4xlarge" : { "Arch" : "PV64" }, - - "c1.medium" : { "Arch" : "PV64" }, - "c1.xlarge" : { "Arch" : "PV64" }, - - "cc1.4xlarge" : { "Arch" : "CLU64" }, - "cc2.8xlarge" : { "Arch" : "CLU64" }, - - "cg1.4xlarge" : { "Arch" : "GPU64" }, - - "hi1.4xlarge" : { "Arch" : "PV64" }, - - "hs1.8xlarge" : { "Arch" : "PV64" } - }, - - "AWSRegionArch2AMI" : { - "us-east-1" : { "PV64" : "ami-3c994355", "CLU64" : "ami-08249861", "GPU64" : "ami-02f54a6b" }, - "us-west-2" : { "PV64" : "ami-20800c10", "CLU64" : "ami-2431bf14", "GPU64" : "NOT_YET_SUPPORTED" }, - "us-west-1" : { "PV64" : "ami-87712ac2", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "eu-west-1" : { "PV64" : "ami-c37474b7", "CLU64" : "ami-d97474ad", "GPU64" : "ami-1b02026f" }, - "ap-southeast-1" : { "PV64" : "ami-a6a7e7f4", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-southeast-2" : { "PV64" : "ami-bd990e87", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-northeast-1" : { "PV64" : "ami-4e6cd34f", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "sa-east-1" : { "PV64" : "ami-1e08d103", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" } - } - }, - - "Resources" : { - "Ec2Instance" : { - "Type" : "AWS::EC2::Instance", - "Properties" : { - "ImageId" : { "Fn::FindInMap" : [ "AWSRegionArch2AMI", { "Ref" : "AWS::Region" }, - { "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : "InstanceType" }, "Arch" ] } ] }, - "KeyName" : { "Ref" : "KeyName" }, - "InstanceType" : { "Ref" : "InstanceType" }, - "SecurityGroups" : [{ "Ref" : "Ec2SecurityGroup" }], - "BlockDeviceMappings" : [ - { - "DeviceName" : "/dev/sda1", - "Ebs" : { "VolumeSize" : "50" } - },{ - "DeviceName" : "/dev/sdm", - "Ebs" : { "VolumeSize" : "100" } - } - ] - } - }, - - "Ec2SecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "HTTP and SSH access", - "SecurityGroupIngress" : [ { - "IpProtocol" : "tcp", - "FromPort" : "22", "ToPort" : "22", - "CidrIp" : { "Ref" : "SSHFrom" } - } ] - } - } - }, - - "Outputs" : { - "Instance" : { - "Value" : { "Fn::GetAtt" : [ "Ec2Instance", "PublicDnsName" ] }, - "Description" : "DNS Name of the newly created EC2 instance" - } - } -} diff --git a/playbooks/files/examples/ElastiCache.json b/playbooks/files/examples/ElastiCache.json deleted file mode 100644 index 868a9092c67..00000000000 --- a/playbooks/files/examples/ElastiCache.json +++ /dev/null @@ -1,235 +0,0 @@ -{ - "AWSTemplateFormatVersion" : "2010-09-09", - - "Description" : "AWS CloudFormation Sample Template ElastiCache: Sample template showing how to create an Amazon ElastiCache Cache Cluster with Auto Discovery and access it from a very simple PHP application. **WARNING** This template creates an Amazon Ec2 Instance and an Amazon ElastiCache Cluster. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters" : { - - "KeyName" : { - "Description" : "Name of an existing Amazon EC2 KeyPair for SSH access to the Web Server", - "Type" : "String" - }, - - "InstanceType" : { - "Description" : "WebServer EC2 instance type", - "Type" : "String", - "Default" : "m1.small", - "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge", "m3.xlarge", "m3.2xlarge", "m2.xlarge","m2.2xlarge","m2.4xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge", "hi1.4xlarge", "hs1.8xlarge"], - "ConstraintDescription" : "must be a valid EC2 instance type." - }, - - "CacheNodeType" : { - "Default" : "cache.m1.small", - "Description" : "The compute and memory capacity of the nodes in the Cache Cluster", - "Type" : "String", - "AllowedValues" : [ "cache.m1.small", "cache.m1.large", "cache.m1.xlarge", "cache.m2.xlarge", "cache.m2.2xlarge", "cache.m2.4xlarge", "cache.c1.xlarge" ], - "ConstraintDescription" : "must select a valid Cache Node type." - }, - - "NumberOfCacheNodes" : { - "Default": "1", - "Description" : "The number of Cache Nodes the Cache Cluster should have", - "Type": "Number", - "MinValue": "1", - "MaxValue": "10", - "ConstraintDescription" : "must be between 5 and 10." - }, - "SSHLocation" : { - "Description" : "The IP address range that can be used to SSH to the EC2 instances", - "Type": "String", - "MinLength": "9", - "MaxLength": "18", - "Default": "0.0.0.0/0", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." - } - }, - - "Mappings" : { - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "PV64" }, - "m1.small" : { "Arch" : "PV64" }, - "m1.medium" : { "Arch" : "PV64" }, - "m1.large" : { "Arch" : "PV64" }, - "m1.xlarge" : { "Arch" : "PV64" }, - "m3.xlarge" : { "Arch" : "PV64" }, - "m3.2xlarge" : { "Arch" : "PV64" }, - "m2.xlarge" : { "Arch" : "PV64" }, - "m2.2xlarge" : { "Arch" : "PV64" }, - "m2.4xlarge" : { "Arch" : "PV64" }, - "c1.medium" : { "Arch" : "PV64" }, - "c1.xlarge" : { "Arch" : "PV64" }, - "cc1.4xlarge" : { "Arch" : "CLU64" }, - "cc2.8xlarge" : { "Arch" : "CLU64" }, - "cg1.4xlarge" : { "Arch" : "GPU64" }, - "hi1.4xlarge" : { "Arch" : "PV64" }, - "hs1.8xlarge" : { "Arch" : "PV64" } - }, - - "AWSRegionArch2AMI" : { - "us-east-1" : { "PV64" : "ami-1624987f", "CLU64" : "ami-08249861", "GPU64" : "ami-02f54a6b" }, - "us-west-2" : { "PV64" : "ami-2a31bf1a", "CLU64" : "ami-2431bf14", "GPU64" : "NOT_YET_SUPPORTED" }, - "us-west-1" : { "PV64" : "ami-1bf9de5e", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "eu-west-1" : { "PV64" : "ami-c37474b7", "CLU64" : "ami-d97474ad", "GPU64" : "ami-1b02026f" }, - "ap-southeast-1" : { "PV64" : "ami-a6a7e7f4", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-southeast-2" : { "PV64" : "ami-bd990e87", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "ap-northeast-1" : { "PV64" : "ami-4e6cd34f", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" }, - "sa-east-1" : { "PV64" : "ami-1e08d103", "CLU64" : "NOT_YET_SUPPORTED", "GPU64" : "NOT_YET_SUPPORTED" } - } - }, - - "Resources" : { - - "CacheCluster" : { - "Type": "AWS::ElastiCache::CacheCluster", - "Properties": { - "CacheNodeType" : { "Ref" : "CacheNodeType" }, - "CacheSecurityGroupNames" : [ { "Ref" : "CacheSecurityGroup" } ], - "Engine" : "memcached", - "NumCacheNodes" : { "Ref" : "NumberOfCacheNodes" } - } - }, - - "CacheSecurityGroup": { - "Type": "AWS::ElastiCache::SecurityGroup", - "Properties": { - "Description" : "Lock cache down to Web Server access only" - } - }, - - "CacheSecurityGroupIngress": { - "Type": "AWS::ElastiCache::SecurityGroupIngress", - "Properties": { - "CacheSecurityGroupName" : { "Ref" : "CacheSecurityGroup" }, - "EC2SecurityGroupName" : { "Ref" : "WebServerSecurityGroup" } - } - }, - - "WebServerSecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Enable HTTP and SSH access", - "SecurityGroupIngress" : [ - {"IpProtocol" : "tcp", "FromPort" : "22", "ToPort" : "22", "CidrIp" : { "Ref" : "SSHLocation"} }, - {"IpProtocol" : "tcp", "FromPort" : "80", "ToPort" : "80", "CidrIp" : "0.0.0.0/0"} - ] - } - }, - - "WebServerHost": { - "Type" : "AWS::EC2::Instance", - "Metadata" : { - "AWS::CloudFormation::Init" : { - "config" : { - "packages" : { - "yum" : { - "httpd" : [], - "gcc-c++" : [], - "php" : [], - "php-pear" : [] - } - }, - - "files" : { - "/var/www/html/index.php" : { - "content" : { "Fn::Join" : ["", [ - "AWS CloudFormation sample application for Amazon ElastiCache';\n", - "\n", - "$server_endpoint = '", { "Fn::GetAtt" : [ "CacheCluster", "ConfigurationEndpoint.Address" ]}, "';\n", - "$server_port = ", { "Fn::GetAtt" : [ "CacheCluster", "ConfigurationEndpoint.Port" ]}, ";\n", - "\n", - "/**\n", - " * The following will initialize a Memcached client to utilize the Auto Discovery feature.\n", - " * \n", - " * By configuring the client with the Dynamic client mode with single endpoint, the\n", - " * client will periodically use the configuration endpoint to retrieve the current cache\n", - " * cluster configuration. This allows scaling the cache cluster up or down in number of nodes\n", - " * without requiring any changes to the PHP application. \n", - " */\n", - "\n", - "$dynamic_client = new Memcached();\n", - "$dynamic_client->setOption(Memcached::OPT_CLIENT_MODE, Memcached::DYNAMIC_CLIENT_MODE);\n", - "$dynamic_client->addServer($server_endpoint, $server_port);\n", - "\n", - "$tmp_object = new stdClass;\n", - "$tmp_object->str_attr = 'test';\n", - "$tmp_object->int_attr = 123;\n", - "\n", - "$dynamic_client->set('key', $tmp_object, 10) or die ('Failed to save data to the cache');\n", - "echo '

Store data in the cache (data will expire in 10 seconds)

';\n", - "\n", - "$get_result = $dynamic_client->get('key');\n", - "echo '

Data from the cache:
';\n", - "\n", - "var_dump($get_result);\n", - "\n", - "echo '

';\n", - "?>\n" - ]]}, - "mode" : "000644", - "owner" : "apache", - "group" : "apache" - } - }, - - "commands" : { - "00_install_memcached_client" : { - "command" : "pecl install https://s3.amazonaws.com/elasticache-downloads/ClusterClient/PHP/latest-64bit" - }, - "01_enable_auto_discovery" : { - "command" : "echo 'extension=amazon-elasticache-cluster-client.so' > /etc/php.d/memcached.ini" - } - }, - - "services" : { - "sysvinit" : { - "httpd" : { "enabled" : "true", "ensureRunning" : "true" }, - "sendmail" : { "enabled" : "false", "ensureRunning" : "false" } - } - } - } - } - }, - "Properties": { - "ImageId" : { "Fn::FindInMap" : [ "AWSRegionArch2AMI", { "Ref" : "AWS::Region" }, - { "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : "InstanceType" }, "Arch" ]}]}, - "InstanceType" : { "Ref" : "InstanceType" }, - "SecurityGroups" : [ {"Ref" : "WebServerSecurityGroup"} ], - "KeyName" : { "Ref" : "KeyName" }, - "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ - "#!/bin/bash -v\n", - "yum update -y aws-cfn-bootstrap\n", - - "# Setup the PHP sample application\n", - "/opt/aws/bin/cfn-init ", - " --stack ", { "Ref" : "AWS::StackName" }, - " --resource WebServerHost ", - " --region ", { "Ref" : "AWS::Region" }, "\n", - - "# Signal the status of cfn-init\n", - "/opt/aws/bin/cfn-signal -e $? '", { "Ref" : "WebServerWaitHandle" }, "'\n" - ]]}} - } - }, - - "WebServerWaitHandle" : { - "Type" : "AWS::CloudFormation::WaitConditionHandle" - }, - - "WebServerWaitCondition" : { - "Type" : "AWS::CloudFormation::WaitCondition", - "DependsOn" : "WebServerHost", - "Properties" : { - "Handle" : {"Ref" : "WebServerWaitHandle"}, - "Timeout" : "300" - } - } - }, - "Outputs" : { - "WebsiteURL" : { - "Value" : { "Fn::Join" : ["", ["http://", { "Fn::GetAtt" : [ "WebServerHost", "PublicDnsName" ]} ]] }, - "Description" : "Application URL" - } - } -} diff --git a/playbooks/files/examples/RDS_MySQL_55_With_Tags.json b/playbooks/files/examples/RDS_MySQL_55_With_Tags.json deleted file mode 100644 index f9ab015205d..00000000000 --- a/playbooks/files/examples/RDS_MySQL_55_With_Tags.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "AWSTemplateFormatVersion" : "2010-09-09", - - "Description" : "AWS CloudFormation Sample Template RDS_MySQL_55_With_Tags: Sample template showing how to create an RDS DBInstance version 5.5 with tags and alarming on important metrics that indicate the health of the database **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", - - "Parameters": { - "DBName": { - "Default": "MyDatabase", - "Description" : "The database name", - "Type": "String", - "MinLength": "1", - "MaxLength": "64", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, - "DBUser": { - "NoEcho": "true", - "Description" : "The database admin account username", - "Type": "String", - "MinLength": "1", - "MaxLength": "16", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, - "DBPassword": { - "NoEcho": "true", - "Description" : "The database admin account password", - "Type": "String", - "MinLength": "1", - "MaxLength": "41", - "AllowedPattern" : "[a-zA-Z0-9]*", - "ConstraintDescription" : "must contain only alphanumeric characters." - }, - "DBAllocatedStorage": { - "Default": "5", - "Description" : "The size of the database (Gb)", - "Type": "Number", - "MinValue": "5", - "MaxValue": "1024", - "ConstraintDescription" : "must be between 5 and 1024Gb." - }, - "DBInstanceClass": { - "Default": "db.m1.small", - "Description" : "The database instance type", - "Type": "String", - "AllowedValues" : [ "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge" ], - "ConstraintDescription" : "must select a valid database instance type." - } - }, - - "Mappings" : { - "InstanceTypeMap" : { - "db.m1.small" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m1.large" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m1.xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m2.xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m2.2xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - }, - "db.m2.4xlarge" : { - "CPULimit" : "60", - "FreeStorageSpaceLimit" : "1024", - "ReadIOPSLimit" : "100", - "WriteIOPSLimit" : "100" - } - } - }, - - "Resources" : { - - "MyDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "DBName" : { "Ref" : "DBName" }, - "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Engine" : "MySQL", - "EngineVersion" : "5.5", - "MasterUsername" : { "Ref" : "DBUser" }, - "MasterUserPassword" : { "Ref" : "DBPassword" }, - "Tags" : [{ - "Key" : "Name", - "Value" : "My SQL Database" - }] - }, - "DeletionPolicy" : "Snapshot" - } - }, - - "Outputs" : { - "JDBCConnectionString": { - "Description" : "JDBC connection string for database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "MyDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "MyDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - }, - "DBAddress" : { - "Description" : "Address of database endpoint", - "Value" : { "Fn::GetAtt": [ "MyDB", "Endpoint.Address" ] } - }, - "DBPort" : { - "Description" : "Database endpoint port number", - "Value" : { "Fn::GetAtt": [ "MyDB", "Endpoint.Port" ] } - } - } -} diff --git a/playbooks/flower.yml b/playbooks/flower.yml new file mode 100644 index 00000000000..c84bc3f5b50 --- /dev/null +++ b/playbooks/flower.yml @@ -0,0 +1,11 @@ +- name: Deploy celery flower (monitoring tool) + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - flower diff --git a/playbooks/forum.yml b/playbooks/forum.yml new file mode 100644 index 00000000000..b3ee89ab9b7 --- /dev/null +++ b/playbooks/forum.yml @@ -0,0 +1,23 @@ +- name: Deploy forum + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + CLUSTER_NAME: 'forum' + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - forum + - forum + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/ghost.yml b/playbooks/ghost.yml new file mode 100644 index 00000000000..e9f4398b636 --- /dev/null +++ b/playbooks/ghost.yml @@ -0,0 +1,11 @@ +- name: Install gh-ost + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - ghost + diff --git a/playbooks/go-agent-docker.yml b/playbooks/go-agent-docker.yml new file mode 100644 index 00000000000..89c4acad39b --- /dev/null +++ b/playbooks/go-agent-docker.yml @@ -0,0 +1,10 @@ +# ansible-playbook -i 'admin.edx.org,' ./hotg.yml -e@/path/to/ansible/vars/edx.yml -e@/path/to/secure/ansible/vars/edx_admin.yml + +- name: Install go-agent-docker-server + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - go-agent-docker-server diff --git a/playbooks/group_vars/README.md b/playbooks/group_vars/README.md deleted file mode 100644 index c68b41847d6..00000000000 --- a/playbooks/group_vars/README.md +++ /dev/null @@ -1,5 +0,0 @@ -After EC2 discovery variables in the files that match any -of the discovered groups will be set. - -For convenience a single variable is set -for every Group tag for conditional task execution. diff --git a/playbooks/group_vars/README.rst b/playbooks/group_vars/README.rst new file mode 100644 index 00000000000..295f1a7faaf --- /dev/null +++ b/playbooks/group_vars/README.rst @@ -0,0 +1,5 @@ +After EC2 discovery variables in the files that match any of the discovered +groups will be set. + +For convenience a single variable is set for every Group tag for conditional +task execution. diff --git a/playbooks/group_vars/all b/playbooks/group_vars/all deleted file mode 100644 index fce5d9a62d4..00000000000 --- a/playbooks/group_vars/all +++ /dev/null @@ -1,10 +0,0 @@ ---- -# these pathes are relative to the playbook dir -# directory for secret settings (keys, etc) -# - -secure_dir: 'path/to/secure_example' - -# this indicates the path to site-specific (with precedence) -# things like nginx template files -local_dir: 'path/to/ansible_local' diff --git a/playbooks/group_vars/tag_environment_prod b/playbooks/group_vars/tag_environment_prod index 784f8a3860a..e802cd09764 100644 --- a/playbooks/group_vars/tag_environment_prod +++ b/playbooks/group_vars/tag_environment_prod @@ -1,5 +1,3 @@ --- secure_dir: '../../configuration-secure/ansible' -# this indicates the path to site-specific (with precedence) -# things like nginx template files local_dir: '../../configuration-secure/ansible/local' diff --git a/playbooks/insights.yml b/playbooks/insights.yml new file mode 100644 index 00000000000..f780a5df4d6 --- /dev/null +++ b/playbooks/insights.yml @@ -0,0 +1,26 @@ +- name: Deploy Insights + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'insights' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - insights + - insights + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'insights' + when: INSIGHTS_HERMES_ENABLED diff --git a/playbooks/insightvm_agent.yml b/playbooks/insightvm_agent.yml new file mode 100644 index 00000000000..f5fe003b3d4 --- /dev/null +++ b/playbooks/insightvm_agent.yml @@ -0,0 +1,11 @@ +- name: Deploy insightvm_agent + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - insightvm_agent diff --git a/playbooks/jenkins_admin.yml b/playbooks/jenkins_admin.yml new file mode 100644 index 00000000000..72f99a46c3f --- /dev/null +++ b/playbooks/jenkins_admin.yml @@ -0,0 +1,27 @@ +# Configure an instance with the admin jenkins. +- name: install python2 + hosts: all + become: True + gather_facts: False + roles: + - python +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - jenkins_admin + # This requires an override of the following form: + # SPLUNKFORWARDER_LOG_ITEMS: + # - source: /edx/var/jenkins/jobs/*/builds/*/log + # index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-jenkins' + # sourcetype: jenkins_build + # followSymlink: false + # crcSalt: + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/jenkins_data_engineering.yml b/playbooks/jenkins_data_engineering.yml new file mode 100644 index 00000000000..f78e1f1ee31 --- /dev/null +++ b/playbooks/jenkins_data_engineering.yml @@ -0,0 +1,37 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + +- name: Mount EBS + hosts: all + become: True + vars: + volumes: "{{ JENKINS_VOLUMES }}" + roles: + - mount_ebs + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + COMMON_ENABLE_SPLUNKFORWARDER: False + COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: True + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - jenkins_data_engineering + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + tags: + - newreliconly + - role: aws_cloudwatch_agent + tags: + - cloudwatch diff --git a/playbooks/jenkins_data_engineering_new.yml b/playbooks/jenkins_data_engineering_new.yml new file mode 100644 index 00000000000..08e2fd3a28c --- /dev/null +++ b/playbooks/jenkins_data_engineering_new.yml @@ -0,0 +1,39 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + +- name: Mount EBS + hosts: all + become: True + vars: + volumes: "{{ JENKINS_VOLUMES }}" + roles: + - mount_ebs + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + COMMON_ENABLE_SPLUNKFORWARDER: False + COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: True + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + ansible_distribution_release: focal + + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - docker-tools + - jenkins_data_engineering_new + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + tags: + - newreliconly + - role: aws_cloudwatch_agent + tags: + - cloudwatch diff --git a/playbooks/jenkins_it.yml b/playbooks/jenkins_it.yml new file mode 100644 index 00000000000..3a34b4a3669 --- /dev/null +++ b/playbooks/jenkins_it.yml @@ -0,0 +1,22 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: True + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - docker-tools + - jenkins_it diff --git a/playbooks/learner_dashboard.yml b/playbooks/learner_dashboard.yml new file mode 100644 index 00000000000..e4d47f9d189 --- /dev/null +++ b/playbooks/learner_dashboard.yml @@ -0,0 +1,17 @@ +- name: Deploy Learner Dashboard Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'learner-dashboard' + LEARNER_DASHBOARD_MFE_ENABLED: True + LEARNER_DASHBOARD_MFE_SANDBOX_BUILD: False + roles: + - role: mfe + MFE_NAME: learner-dashboard + MFE_VERSION: '{{ LEARNER_DASHBOARD_MFE_VERSION }}' + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/learner_portal.yml b/playbooks/learner_portal.yml new file mode 100644 index 00000000000..cab41f71b73 --- /dev/null +++ b/playbooks/learner_portal.yml @@ -0,0 +1,21 @@ +- name: Deploy learner_portal Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'learner-portal' + LEARNER_PORTAL_ENABLED: True + LEARNER_PORTAL_SANDBOX_BUILD: False + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - learner_portal + LEARNER_PORTAL_NGINX_PORT: 8775 + - learner_portal + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/learning.yml b/playbooks/learning.yml new file mode 100644 index 00000000000..ca771b75019 --- /dev/null +++ b/playbooks/learning.yml @@ -0,0 +1,17 @@ +- name: Deploy learning MFE Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'learning' + MYMFE_ENABLED: True + MYMFE_SANDBOX_BUILD: False + roles: + - role: mfe + MFE_NAME: learning + MFE_VERSION: '{{ LEARNING_MFE_VERSION }}' + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/library/ec2_acl b/playbooks/library/ec2_acl new file mode 100644 index 00000000000..901196f449f --- /dev/null +++ b/playbooks/library/ec2_acl @@ -0,0 +1,286 @@ +#!/usr/bin/env python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import +from __future__ import print_function +DOCUMENTATION = """ +--- +module: ec2_acl +short_description: Create or delete AWS Network ACLs. +description: + - Can create or delete AwS Network ACLs. +version_added: "1.8" +author: Edward Zarecor +options: + state: + description: + - create, update or delete the acl + required: true + choices: ['present', 'absent'] + name: + description: + - Unique name for acl + required: true + vpc_id: + description: + - The VPC that this acl belongs to + required: true + default: null +extends_documentation_fragment: aws +""" + +EXAMPLES = ''' +- ec2_acl: + name: public-acls + state: present + vpc_id: 'vpc-abababab' + +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * +import sys +try: + import boto.vpc +except ImportError: + print("failed=True msg={0}".format(sys.executable)) + #print "failed=True msg='boto required for this module'" + sys.exit(1) + +from boto.exception import NoAuthHandlerFound + +PROTOCOL_NUMBERS = {"ICMP": 1, "TCP": 6, "UDP": 17, "ALL": -1 } + + +class DuplicateAclError(Exception): + pass + + +class ACLManager(): + + def __init__(self, connection, vpc_id, acl_name, rules, tags=[]): + self.connection = connection + self.vpc_id = vpc_id + self.acl_name = acl_name + self.rules = rules + self.tags = tags + self.acl = None + + def get_acl(self): + + if not self.acl: + results = self.connection.get_all_network_acls(filters={"vpc_id": self.vpc_id, "tag:Name": self.acl_name}) + + if len(results) == 1: + self.acl = results[0] + elif len(results) > 1: + raise DuplicateAclError("Found multiple network acls name {0} in vpc with id {1}". + format(self.acl_name, self.vpc_id)) + else: + # Does exist yet + pass + + return self.acl + + def create_acl(self): + self.acl = self.connection.create_network_acl(self.vpc_id) + changed = True + self.do_tags() + return changed + + def update_acl(self): + changed = False + self.update_rules() + self.do_tags() + return changed + + # TODO refactor out repitition + def update_rules(self): + + current_ingress = [x.rule_number for x in self.acl.network_acl_entries if x.egress == 'false'] + current_egress = [x.rule_number for x in self.acl.network_acl_entries if x.egress == 'true'] + + modified_ingress = [] + modified_egress = [] + + for rule in self.rules: + egress = True if rule['type'] == "egress" else False + protocol = PROTOCOL_NUMBERS[rule['protocol'].upper()] + + if not egress: + if rule['number'] not in current_ingress: + # new rule + self.connection.create_network_acl_entry( + self.acl.id, + rule['number'], + protocol, + rule['rule_action'], + rule['cidr_block'], + egress=egress, + port_range_from=rule['from_port'], + port_range_to=rule['to_port']) + else: + # blindly replace rather than attempting + # to determine in the entry has changed + modified_ingress.append(rule['number']) + self.connection.replace_network_acl_entry ( + self.acl.id, + rule['number'], + protocol, + rule['rule_action'], + rule['cidr_block'], + egress=egress, + port_range_from=rule['from_port'], + port_range_to=rule['to_port']) + else: + if rule['number'] not in current_egress: + # new rule + self.connection.create_network_acl_entry( + self.acl.id, + rule['number'], + protocol, + rule['rule_action'], + rule['cidr_block'], + egress=egress, + port_range_from=rule['from_port'], + port_range_to=rule['to_port']) + else: + # blindly replace rather than attempting + # to determine in the entry has changed + modified_egress.append(rule['number']) + self.connection.replace_network_acl_entry ( + self.acl.id, + rule['number'], + protocol, + rule['rule_action'], + rule['cidr_block'], + egress=egress, + port_range_from=rule['from_port'], + port_range_to=rule['to_port']) + + removed_ingress_rule_numbers = [ c for c in current_ingress if c not in modified_ingress ] + removed_egress_rule_numbers = [ c for c in current_egress if c not in modified_egress ] + + for number in removed_ingress_rule_numbers: + n = int(number) + # reserved range for AWS + if n < 32767: + self.connection.delete_network_acl_entry(self.acl.id, n, False) + + for number in removed_egress_rule_numbers: + n = int(number) + # reserved range for AWS + if n < 32767: + self.connection.delete_network_acl_entry(self.acl.id, n, True) + + + def create_rules(self): + if self.rules is None: + return + for rule in self.rules: + egress = True if rule['type'] == "egress" else False + protocol = PROTOCOL_NUMBERS[rule['protocol'].upper()] + self.connection.create_network_acl_entry( + self.acl.id, + rule['number'], + protocol, + rule['rule_action'], + rule['cidr_block'], + egress=egress, + port_range_from=rule['from_port'], + port_range_to=rule['to_port']) + + def do_tags(self): + + tags = {'Name': self.acl_name} + if self.tags: + for tag in self.tags: + tags[tag['key']] = tag['value'] + self.get_acl().add_tags(tags) + + def present(self): + + existing = self.get_acl() + + if not existing: + changed = self.create_acl() + self.create_rules() + else: + changed = self.update_acl() + + results = dict(changed=changed, + id=self.acl.id, + name=self.acl_name, + entries=self.rules) + + return results + + def absent(self): + acl = self.get_acl() + changed = False + + if acl: + changed = self.connection.delete_network_acl(acl.id) + results = dict(changed=changed, + id=self.acl.id, + name=self.acl_name) + + return results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent']), + vpc_id=dict(required=True, type='str'), + rules=dict(type='list'), + tags=dict(type='list'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + profile = module.params.get('profile') + if region: + try: + connection = boto.vpc.connect_to_region(region, profile_name=profile) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + vpc_id = module.params.get('vpc_id') + acl_name = module.params.get('name') + rules_in = module.params.get('rules') + tags = module.params.get('tags') + + manager = ACLManager(connection, vpc_id, acl_name, rules_in, tags) + + state = module.params.get('state') + + results = dict() + + if state == 'present': + results = manager.present() + elif state == 'absent': + results = manager.absent() + + module.exit_json(**results) + +main() diff --git a/playbooks/library/ec2_elb_local_1.3 b/playbooks/library/ec2_elb_local_1.3 deleted file mode 100644 index 23c04c774c1..00000000000 --- a/playbooks/library/ec2_elb_local_1.3 +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_elb -short_description: De-registers or registers instances from EC2 ELB(s) -description: - - This module de-registers or registers an AWS EC2 instance from the ELB(s) - that it belongs to. - - Returns fact "ec2_elbs" which is a list of elbs attached to the instance - if state=absent is passed as an argument. - - Will be marked changed when called only if there are ELBs found to operate on. -version_added: "1.2" -requirements: [ "boto" ] -author: John Jarvis -options: - state: - description: - - register or deregister the instance - required: true - - instance_id: - description: - - EC2 Instance ID - required: true - - ec2_elbs: - description: - - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. - required: false - default: None - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - def2ault: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - wait: - description: - - Wait for instance registration or deregistration to complete successfully before returning. - required: false - default: yes - choices: [ "yes", "no" ] - -""" - -EXAMPLES = """ -# basic pre_task and post_task example -pre_tasks: - - name: Gathering ec2 facts - ec2_facts: - - name: Instance De-register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - state: 'absent' -roles: - - myrole -post_tasks: - - name: Instance Register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ item }}" - state: 'present' - with_items: ec2_elbs -""" - -import time -import sys -import os - -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - -try: - import boto - import boto.ec2.elb - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -class ElbManager: - """Handles EC2 instance ELB registration and de-registration""" - - def __init__(self, module, instance_id=None, ec2_elbs=None, - aws_access_key=None, aws_secret_key=None, region=None): - self.aws_access_key = aws_access_key - self.aws_secret_key = aws_secret_key - self.module = module - self.instance_id = instance_id - self.region = region - self.lbs = self._get_instance_lbs(ec2_elbs) - - # if there are no ELBs to operate on - # there will be no changes made - if len(self.lbs) > 0: - self.changed = True - else: - self.changed = False - - def deregister(self, wait): - """De-register the instance from all ELBs and wait for the ELB - to report it out-of-service""" - - for lb in self.lbs: - lb.deregister_instances([self.instance_id]) - if wait: - self._await_elb_instance_state(lb, 'OutOfService') - - def register(self, wait): - """Register the instance for all ELBs and wait for the ELB - to report the instance in-service""" - - for lb in self.lbs: - lb.register_instances([self.instance_id]) - if wait: - self._await_elb_instance_state(lb, 'InService') - - def exists(self, lbtest): - """ Verify that the named ELB actually exists """ - - found = False - for lb in self.lbs: - if lb.name == lbtest: - found=True - break - return found - - - def _await_elb_instance_state(self, lb, awaited_state): - """Wait for an ELB to change state - lb: load balancer - awaited_state : state to poll for (string)""" - - while True: - state = lb.get_instance_health([self.instance_id])[0].state - if state == awaited_state: - break - else: - time.sleep(1) - - def _get_instance_lbs(self, ec2_elbs=None): - """Returns a list of ELBs attached to self.instance_id - ec2_elbs: an optional list of elb names that will be used - for elb lookup instead of returning what elbs - are attached to self.instance_id""" - - try: - endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - - elbs = elb.get_all_load_balancers() - - if ec2_elbs: - lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) - else: - lbs = [] - for lb in elbs: - for info in lb.instances: - if self.instance_id == info.id: - lbs.append(lb) - return lbs - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state={'required': True, - 'choices': ['present', 'absent']}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type':'list'}, - aws_secret_key={'default': None, 'aliases': ['ec2_secret_key', 'secret_key'], 'no_log': True}, - aws_access_key={'default': None, 'aliases': ['ec2_access_key', 'access_key']}, - region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS}, - wait={'required': False, 'choices': BOOLEANS, 'default': True} - ) - ) - - aws_secret_key = module.params['aws_secret_key'] - aws_access_key = module.params['aws_access_key'] - ec2_elbs = module.params['ec2_elbs'] - region = module.params['region'] - wait = module.params['wait'] - - if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: - module.fail_json(msg="ELBs are required for registration") - - if not aws_secret_key: - if 'AWS_SECRET_KEY' in os.environ: - aws_secret_key = os.environ['AWS_SECRET_KEY'] - elif 'EC2_SECRET_KEY' in os.environ: - aws_secret_key = os.environ['EC2_SECRET_KEY'] - - if not aws_access_key: - if 'AWS_ACCESS_KEY' in os.environ: - aws_access_key = os.environ['AWS_ACCESS_KEY'] - elif 'EC2_ACCESS_KEY' in os.environ: - aws_access_key = os.environ['EC2_ACCESS_KEY'] - - if not region: - if 'AWS_REGION' in os.environ: - region = os.environ['AWS_REGION'] - elif 'EC2_REGION' in os.environ: - region = os.environ['EC2_REGION'] - - if not region: - module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set.")) - - instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key, - aws_secret_key, region=region) - - for elb in ec2_elbs: - if not elb_man.exists(elb): - msg="ELB %s does not exist" % elb - module.fail_json(msg=msg) - - if module.params['state'] == 'present': - elb_man.register(wait) - elif module.params['state'] == 'absent': - elb_man.deregister(wait) - - ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} - ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - -# this is magic, see lib/ansible/module_common.py -#<> - -main() diff --git a/playbooks/library/ec2_elb_local_1.5 b/playbooks/library/ec2_elb_local_1.5 deleted file mode 100644 index 987ae72333c..00000000000 --- a/playbooks/library/ec2_elb_local_1.5 +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_elb -short_description: De-registers or registers instances from EC2 ELBs -description: - - This module de-registers or registers an AWS EC2 instance from the ELBs - that it belongs to. - - Returns fact "ec2_elbs" which is a list of elbs attached to the instance - if state=absent is passed as an argument. - - Will be marked changed when called only if there are ELBs found to operate on. -version_added: "1.2" -requirements: [ "boto" ] -author: John Jarvis -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - - instance_id: - description: - - EC2 Instance ID - required: true - - ec2_elbs: - description: - - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. - required: false - default: None - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - enable_availability_zone: - description: - - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already - been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. - required: false - default: yes - choices: [ "yes", "no" ] - wait: - description: - - Wait for instance registration or deregistration to complete successfully before returning. - required: false - default: yes - choices: [ "yes", "no" ] - -""" - -EXAMPLES = """ -# basic pre_task and post_task example -pre_tasks: - - name: Gathering ec2 facts - ec2_facts: - - name: Instance De-register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - state: 'absent' -roles: - - myrole -post_tasks: - - name: Instance Register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ item }}" - state: 'present' - with_items: ec2_elbs -""" - -import time -import sys -import os - -try: - import boto - import boto.ec2 - import boto.ec2.elb - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -class ElbManager: - """Handles EC2 instance ELB registration and de-registration""" - - def __init__(self, module, instance_id=None, ec2_elbs=None, - aws_access_key=None, aws_secret_key=None, region=None): - self.aws_access_key = aws_access_key - self.aws_secret_key = aws_secret_key - self.module = module - self.instance_id = instance_id - self.region = region - self.lbs = self._get_instance_lbs(ec2_elbs) - self.changed = False - - def deregister(self, wait): - """De-register the instance from all ELBs and wait for the ELB - to report it out-of-service""" - - for lb in self.lbs: - initial_state = self._get_instance_health(lb) if wait else None - - if initial_state and initial_state.state == 'InService': - lb.deregister_instances([self.instance_id]) - else: - return - - if wait: - self._await_elb_instance_state(lb, 'OutOfService', initial_state) - else: - # We cannot assume no change was made if we don't wait - # to find out - self.changed = True - - def register(self, wait, enable_availability_zone): - """Register the instance for all ELBs and wait for the ELB - to report the instance in-service""" - for lb in self.lbs: - if wait: - initial_state = self._get_instance_health(lb) - - if enable_availability_zone: - self._enable_availailability_zone(lb) - - lb.register_instances([self.instance_id]) - - if wait: - self._await_elb_instance_state(lb, 'InService', initial_state) - else: - # We cannot assume no change was made if we don't wait - # to find out - self.changed = True - - def exists(self, lbtest): - """ Verify that the named ELB actually exists """ - - found = False - for lb in self.lbs: - if lb.name == lbtest: - found=True - break - return found - - def _enable_availailability_zone(self, lb): - """Enable the current instance's availability zone in the provided lb. - Returns True if the zone was enabled or False if no change was made. - lb: load balancer""" - instance = self._get_instance() - if instance.placement in lb.availability_zones: - return False - - lb.enable_zones(zones=instance.placement) - - # If successful, the new zone will have been added to - # lb.availability_zones - return instance.placement in lb.availability_zones - - def _await_elb_instance_state(self, lb, awaited_state, initial_state): - """Wait for an ELB to change state - lb: load balancer - awaited_state : state to poll for (string)""" - while True: - instance_state = self._get_instance_health(lb) - - if not instance_state: - msg = ("The instance %s could not be put in service on %s." - " Reason: Invalid Instance") - self.module.fail_json(msg=msg % (self.instance_id, lb)) - - if instance_state.state == awaited_state: - # Check the current state agains the initial state, and only set - # changed if they are different. - if (initial_state is None) or (instance_state.state != initial_state.state): - self.changed = True - break - elif self._is_instance_state_pending(instance_state): - # If it's pending, we'll skip further checks andd continue waiting - pass - elif (awaited_state == 'InService' - and instance_state.reason_code == "Instance"): - # If the reason_code for the instance being out of service is - # "Instance" this indicates a failure state, e.g. the instance - # has failed a health check or the ELB does not have the - # instance's availabilty zone enabled. The exact reason why is - # described in InstantState.description. - msg = ("The instance %s could not be put in service on %s." - " Reason: %s") - self.module.fail_json(msg=msg % (self.instance_id, - lb, - instance_state.description)) - time.sleep(1) - - def _is_instance_state_pending(self, instance_state): - """ - Determines whether the instance_state is "pending", meaning there is - an operation under way to bring it in service. - """ - # This is messy, because AWS provides no way to distinguish between - # an instance that is is OutOfService because it's pending vs. OutOfService - # because it's failing health checks. So we're forced to analyze the - # description, which is likely to be brittle. - return (instance_state and 'pending' in instance_state.description) - - def _get_instance_health(self, lb): - """ - Check instance health, should return status object or None under - certain error conditions. - """ - try: - status = lb.get_instance_health([self.instance_id])[0] - except boto.exception.BotoServerError, e: - if e.error_code == 'InvalidInstance': - return None - else: - raise - return status - - def _get_instance_lbs(self, ec2_elbs=None): - """Returns a list of ELBs attached to self.instance_id - ec2_elbs: an optional list of elb names that will be used - for elb lookup instead of returning what elbs - are attached to self.instance_id""" - - try: - endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - - elbs = elb.get_all_load_balancers() - - if ec2_elbs: - lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) - else: - lbs = [] - for lb in elbs: - for info in lb.instances: - if self.instance_id == info.id: - lbs.append(lb) - return lbs - - def _get_instance(self): - """Returns a boto.ec2.InstanceObject for self.instance_id""" - try: - endpoint = "ec2.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - ec2_conn = boto.ec2.EC2Connection(self.aws_access_key, self.aws_secret_key, region=connect_region) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - return ec2_conn.get_only_instances(instance_ids=[self.instance_id])[0] - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state={'required': True, - 'choices': ['present', 'absent']}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type':'list'}, - ec2_secret_key={'default': None, 'aliases': ['aws_secret_key', 'secret_key'], 'no_log': True}, - ec2_access_key={'default': None, 'aliases': ['aws_access_key', 'access_key']}, - region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region']}, - enable_availability_zone={'default': True, 'required': False, 'choices': BOOLEANS, 'type': 'bool'}, - wait={'required': False, 'choices': BOOLEANS, 'default': True, 'type': 'bool'} - ) - ) - - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - ec2_elbs = module.params['ec2_elbs'] - region = module.params['region'] - wait = module.params['wait'] - enable_availability_zone = module.params['enable_availability_zone'] - - if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: - module.fail_json(msg="ELBs are required for registration") - - instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key, - aws_secret_key, region=region) - - if ec2_elbs is not None: - for elb in ec2_elbs: - if not elb_man.exists(elb): - msg="ELB %s does not exist" % elb - module.fail_json(msg=msg) - - if module.params['state'] == 'present': - elb_man.register(wait, enable_availability_zone) - elif module.params['state'] == 'absent': - elb_man.deregister(wait) - - ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} - ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/playbooks/library/ec2_group_local b/playbooks/library/ec2_group_local new file mode 100644 index 00000000000..4a66c41377c --- /dev/null +++ b/playbooks/library/ec2_group_local @@ -0,0 +1,413 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +from __future__ import absolute_import +from __future__ import print_function +import six +from functools import reduce +DOCUMENTATION = ''' +--- +module: ec2_group +version_added: "1.3" +short_description: maintain an ec2 VPC security group. +description: + - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5 +options: + name: + description: + - Name of the security group. + required: true + description: + description: + - Description of the security group. + required: true + vpc_id: + description: + - ID of the VPC to create the group in. + required: false + rules: + description: + - List of firewall inbound rules to enforce in this group (see example). + required: false + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). + required: false + version_added: "1.6" + tags: + description: + - List of tags to apply to this security group + required: false + version_added: "1.8" + region: + description: + - the EC2 region to use + required: false + default: null + aliases: [] + state: + version_added: "1.4" + description: + - create or delete security group + required: false + default: 'present' + aliases: [] + +extends_documentation_fragment: aws + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. +''' + +EXAMPLES = ''' +- name: example ec2 group + local_action: + module: ec2_group + name: example + description: an example EC2 group + vpc_id: 12345 + region: eu-west-1a + aws_secret_key: SECRET + aws_access_key: ACCESS + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10050 + to_port: 10050 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10051 + to_port: 10051 + group_id: sg-12345678 + - proto: all + # the containing group name may be specified here + group_name: example + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group + tags: + - key: environment + value: production +''' + +try: + import boto.ec2 +except ImportError: + print("failed=True msg='boto required for this module'") + sys.exit(1) + + +def addRulesToLookup(rules, prefix, dict): + for rule in rules: + for grant in rule.grants: + dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port, + grant.group_id, grant.cidr_ip)] = rule + + +def get_target_from_rule(module, rule, name, group, groups): + """ + Returns tuple of (group_id, ip) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + """ + + group_id = None + group_name = None + ip = None + target_group_created = False + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_id OR cidr_ip, not both") + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_name OR cidr_ip, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg="Specify group_id OR group_name, not both") + elif 'group_id' in rule: + group_id = rule['group_id'] + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name in groups: + group_id = groups[group_name].id + elif group_name == name: + group_id = group.id + groups[group_id] = group + groups[group_name] = group + else: + if not rule.get('group_desc', '').strip(): + module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule)) + if not module.check_mode: + auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id) + group_id = auto_group.id + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True + elif 'cidr_ip' in rule: + ip = rule['cidr_ip'] + + return group_id, ip, target_group_created + +## can be removed if https://github.com/ansible/ansible/pull/9113 is merged upstream +def is_taggable(object): + + from boto.ec2.ec2object import TaggedEC2Object + if not object or not issubclass(object.__class__, TaggedEC2Object): + return False + + return True + +def do_tags(module, object, tags): + """ + General function for adding tags to objects that are subclasses + of boto.ec2.ec2object.TaggedEC2Object. Currently updates + existing tags, as the API overwrites them, but does not remove + orphans. + :param module: + :param object: + :param tags: + """ + dry_run = True if module.check_mode else False + + if (is_taggable(object)): + + tag_dict = {} + + for tag in tags: + tag_dict[tag['key']] = tag['value'] + + object.add_tags(tag_dict, dry_run) + else: + module.fail_json(msg="Security group object is not a subclass of TaggedEC2Object") +## end can be removed + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + description=dict(required=True), + vpc_id=dict(), + rules=dict(), + rules_egress=dict(), + tags=dict(type='list', default=[]), + state = dict(default='present', choices=['present', 'absent']), + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params['name'] + description = module.params['description'] + vpc_id = module.params['vpc_id'] + rules = module.params['rules'] + rules_egress = module.params['rules_egress'] + tags = module.params['tags'] + state = module.params.get('state') + + changed = False + + ec2 = ec2_connect(module) + + # find the group if present + group = None + groups = {} + for curGroup in ec2.get_all_security_groups(): + groups[curGroup.id] = curGroup + groups[curGroup.name] = curGroup + + if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id): + group = curGroup + + # Ensure requested group is absent + if state == 'absent': + if group: + '''found a match, delete it''' + try: + group.delete() + except Exception as e: + module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e)) + else: + group = None + changed = True + else: + '''no match found, no changes required''' + + # Ensure requested group is present + elif state == 'present': + if group: + '''existing group found''' + # check the group parameters are correct + group_in_use = False + rs = ec2.get_all_instances() + for r in rs: + for i in r.instances: + group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False) + + if group.description != description: + if group_in_use: + module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.") + + # if the group doesn't exist, create it now + else: + '''no match found, create it''' + if not module.check_mode: + group = ec2.create_security_group(name, description, vpc_id=vpc_id) + + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + # amazon sometimes takes a couple seconds to update the security group so wait till it exists + while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0: + time.sleep(0.1) + + group = ec2.get_all_security_groups(group_ids=(group.id,))[0] + changed = True + + # tag the security group, function imported from ansible.module_utils.ec2 + do_tags(module, group, tags) + else: + module.fail_json(msg="Unsupported state requested: %s" % state) + + # create a lookup for all existing rules on the group + if group: + + # Manage ingress rules + groupRules = {} + addRulesToLookup(group.rules, 'in', groupRules) + + # Now, go through all provided rules and ensure they are there. + if rules: + for rule in rules: + group_id, ip, target_group_created = get_target_from_rule(module, rule, name, group, groups) + if target_group_created: + changed = True + + if rule['proto'] == 'all': + rule['proto'] = -1 + rule['from_port'] = None + rule['to_port'] = None + + # If rule already exists, don't later delete it + ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) + if ruleId in groupRules: + del groupRules[ruleId] + # Otherwise, add new rule + else: + grantGroup = None + if group_id: + grantGroup = groups[group_id] + + if not module.check_mode: + group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup) + changed = True + + # Finally, remove anything left in the groupRules -- these will be defunct rules + for rule in six.itervalues(groupRules): + for grant in rule.grants: + grantGroup = None + if grant.group_id: + grantGroup = groups[grant.group_id] + if not module.check_mode: + group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) + changed = True + + # Manage egress rules + groupRules = {} + addRulesToLookup(group.rules_egress, 'out', groupRules) + + # Now, go through all provided rules and ensure they are there. + if rules_egress: + for rule in rules_egress: + group_id, ip, target_group_created = get_target_from_rule(module, rule, name, group, groups) + if target_group_created: + changed = True + + if rule['proto'] == 'all': + rule['proto'] = -1 + rule['from_port'] = None + rule['to_port'] = None + + # If rule already exists, don't later delete it + ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) + if ruleId in groupRules: + del groupRules[ruleId] + # Otherwise, add new rule + else: + grantGroup = None + if group_id: + grantGroup = groups[group_id].id + + if not module.check_mode: + ec2.authorize_security_group_egress( + group_id=group.id, + ip_protocol=rule['proto'], + from_port=rule['from_port'], + to_port=rule['to_port'], + src_group_id=grantGroup, + cidr_ip=ip) + changed = True + elif vpc_id and not module.check_mode: + # when using a vpc, but no egress rules are specified, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + default_egress_rule = 'out--1-None-None-None-0.0.0.0/0' + if default_egress_rule not in groupRules: + ec2.authorize_security_group_egress( + group_id=group.id, + ip_protocol=-1, + from_port=None, + to_port=None, + src_group_id=None, + cidr_ip='0.0.0.0/0' + ) + changed = True + else: + # make sure the default egress rule is not removed + del groupRules[default_egress_rule] + + # Finally, remove anything left in the groupRules -- these will be defunct rules + for rule in six.itervalues(groupRules): + for grant in rule.grants: + grantGroup = None + if grant.group_id: + grantGroup = groups[grant.group_id].id + if not module.check_mode: + ec2.revoke_security_group_egress( + group_id=group.id, + ip_protocol=rule.ip_protocol, + from_port=rule.from_port, + to_port=rule.to_port, + src_group_id=grantGroup, + cidr_ip=grant.cidr_ip) + changed = True + + if group: + module.exit_json(changed=changed, group_id=group.id) + else: + module.exit_json(changed=changed, group_id=None) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/playbooks/library/ec2_iam_role b/playbooks/library/ec2_iam_role new file mode 100644 index 00000000000..490e7e48300 --- /dev/null +++ b/playbooks/library/ec2_iam_role @@ -0,0 +1,166 @@ +#!/usr/bin/env python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import +from __future__ import print_function +DOCUMENTATION = """ +--- +module: ec2_iam_role +short_description: Create or delete iam roles. +description: + - Can create or delete AwS iam roles. +version_added: "1.8" +author: Edward Zarecor +options: + state: + description: + - create, update or delete the role + required: true + choices: ['present', 'absent'] + name: + description: + - Name for the role + required: true + vpc_id: + description: + - The VPC that this acl belongs to + required: true + default: null +extends_documentation_fragment: aws +""" + +EXAMPLES = ''' +- ec2_acl: + name: public-acls + state: present + vpc_id: 'vpc-abababab' + +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * +import sys +try: + import boto +except ImportError: + print("failed=True msg='boto required for this module'") + sys.exit(1) + +def present(connection, module): + + profile_name = module.params.get('instance_profile_name') + role_name = module.params.get('role_name') + policies = module.params.get('policies') + + fetched_profile = None + fetched_role = None + + profile_arn = None + role_arn = None + + try: + fetched_profile = connection.get_instance_profile(profile_name) + except boto.exception.BotoServerError as bse: + pass + + if not fetched_profile: + instance_profile = connection.create_instance_profile(profile_name) + profile_arn = instance_profile.arn + else: + profile_arn = fetched_profile.arn + + try: + fetched_role = connection.get_role(role_name) + except boto.exception.BotoServerError as bse: + pass + + if not fetched_role: + role = connection.create_role(role_name) + role_arn = role.arn + else: + role_arn = fetched_role.arn + + if not fetched_profile and not fetched_role: + connection.add_role_to_instance_profile(profile_name, role_name) + + for policy in policies: + + fetched_policy = None + + try: + fetched_policy = connection.get_role_policy(role_name, policy['name']) + except boto.exception.BotoServerError as bse: + pass + + if not fetched_policy: + connection.put_role_policy(role_name, policy['name'], policy['document']) + else: + # TODO: idempotent? + connection.put_role_policy(role_name, policy['name'], policy['document']) + + + module.exit_json(changed=True, + instance_profile_arn=profile_arn, + role_arn=role_arn) + + +def absent(connection, module): + + profile_name = module.params.get('instance_profile_name') + role_name = module.params.get('role_name') + policies = module.params.get('policies') + + for policy in policies: + try: + connection.delete_role_policy(role_name,policy['name']) + except boto.exception.BotoServerError as bse: + # TODO: parse code to verify that this is not found case + pass + + connection.remove_role_from_instance_profile(profile_name,role_name) + connection.delete_role(role_name) + connection.delete_instance_profile(profile_name) + + module.exit_json(changed=True) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + instance_profile_name=dict(required=True, type='str'), + role_name=dict(required=True, type='str'), + policies=dict(type='list') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + profile = module.params.get('profile') + + try: + connection = boto.connect_iam(profile_name=profile) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg = str(e)) + + state = module.params.get('state') + + if state == 'present': + present(connection, module) + elif state == 'absent': + absent(connection, module) + +main() diff --git a/playbooks/library/ec2_local b/playbooks/library/ec2_local deleted file mode 100644 index 3d6da7c9998..00000000000 --- a/playbooks/library/ec2_local +++ /dev/null @@ -1,671 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2 -short_description: create or terminate an instance in ec2, return instanceid -description: - - Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5 -version_added: "0.9" -options: - key_name: - description: - - key pair to use on the instance - required: true - default: null - aliases: ['keypair'] - id: - description: - - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). - required: false - default: null - aliases: [] - group: - description: - - security group (or list of groups) to use with the instance - required: false - default: null - aliases: [ 'groups' ] - group_id: - version_added: "1.1" - description: - - security group id (or list of ids) to use with the instance - required: false - default: null - aliases: [] - region: - version_added: "1.2" - description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] - zone: - version_added: "1.2" - description: - - AWS availability zone in which to launch the instance - required: false - default: null - aliases: [ 'aws_zone', 'ec2_zone' ] - instance_type: - description: - - instance type to use for the instance - required: true - default: null - aliases: [] - image: - description: - - I(emi) (or I(ami)) to use for the instance - required: true - default: null - aliases: [] - kernel: - description: - - kernel I(eki) to use for the instance - required: false - default: null - aliases: [] - ramdisk: - description: - - ramdisk I(eri) to use for the instance - required: false - default: null - aliases: [] - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used - required: false - default: null - aliases: [] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - count: - description: - - number of instances to launch - required: False - default: 1 - aliases: [] - monitoring: - version_added: "1.1" - description: - - enable detailed monitoring (CloudWatch) for instance - required: false - default: null - aliases: [] - user_data: - version_added: "0.9" - description: - - opaque blob of data which is made available to the ec2 instance - required: false - default: null - aliases: [] - instance_tags: - version_added: "1.0" - description: - - a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}' - required: false - default: null - aliases: [] - placement_group: - version_added: "1.3" - description: - - placement group for the instance when using EC2 Clustered Compute - required: false - default: null - aliases: [] - vpc_subnet_id: - version_added: "1.1" - description: - - the subnet ID in which to launch the instance (VPC) - required: false - default: null - aliases: [] - private_ip: - version_added: "1.2" - description: - - the private ip address to assign the instance (from the vpc subnet) - required: false - defualt: null - aliases: [] - instance_profile_name: - version_added: "1.3" - description: - - Name of the IAM instance profile to use. Boto library must be 2.5.0+ - required: false - default: null - aliases: [] - instance_ids: - version_added: "1.3" - description: - - list of instance ids, currently only used when state='absent' - required: false - default: null - aliases: [] - state: - version_added: "1.3" - description: - - create or terminate instances - required: false - default: 'present' - aliases: [] - root_ebs_size: - version_added: "1.5" - desription: - - size of the root volume in gigabytes - required: false - default: null - aliases: [] - -requirements: [ "boto" ] -author: Seth Vidal, Tim Gerla, Lester Wade -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic provisioning example -- local_action: - module: ec2 - keypair: mykey - instance_type: c1.medium - image: emi-40603AD1 - wait: yes - group: webserver - count: 3 - -# Advanced example with tagging and CloudWatch -- local_action: - module: ec2 - keypair: mykey - group: databases - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - instance_tags: '{"db":"postgres"}' - monitoring=yes - -# Multiple groups example -local_action: - module: ec2 - keypair: mykey - group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - instance_tags: '{"db":"postgres"}' - monitoring=yes - -# VPC example -- local_action: - module: ec2 - keypair: mykey - group_id: sg-1dc53f72 - instance_type: m1.small - image: ami-6e649707 - wait: yes - vpc_subnet_id: subnet-29e63245 - - -# Launch instances, runs some tasks -# and then terminate them - - -- name: Create a sandbox instance - hosts: localhost - gather_facts: False - vars: - keypair: my_keypair - instance_type: m1.small - security_group: my_securitygroup - image: my_ami_id - region: us-east-1 - tasks: - - name: Launch instance - local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }} - register: ec2 - - name: Add new instance to host group - local_action: add_host hostname={{ item.public_ip }} groupname=launched - with_items: ec2.instances - - name: Wait for SSH to come up - local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started - with_items: ec2.instances - -- name: Configure instance(s) - hosts: launched - sudo: True - gather_facts: True - roles: - - my_awesome_role - - my_awesome_test - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: ec2 - state: 'absent' - instance_ids: {{ec2.instance_ids}} - -''' - -import sys -import time - -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - -try: - import boto.ec2 - from boto.exception import EC2ResponseError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def get_instance_info(inst): - """ - Retrieves instance information from an instance - ID and returns it as a dictionary - """ - instance_info = {'id': inst.id, - 'ami_launch_index': inst.ami_launch_index, - 'private_ip': inst.private_ip_address, - 'private_dns_name': inst.private_dns_name, - 'public_ip': inst.ip_address, - 'dns_name': inst.dns_name, - 'public_dns_name': inst.public_dns_name, - 'state_code': inst.state_code, - 'architecture': inst.architecture, - 'image_id': inst.image_id, - 'key_name': inst.key_name, - 'placement': inst.placement, - 'kernel': inst.kernel, - 'ramdisk': inst.ramdisk, - 'launch_time': inst.launch_time, - 'instance_type': inst.instance_type, - 'root_device_type': inst.root_device_type, - 'root_device_name': inst.root_device_name, - 'state': inst.state, - 'hypervisor': inst.hypervisor} - try: - instance_info['virtualization_type'] = getattr(inst,'virtualization_type') - except AttributeError: - instance_info['virtualization_type'] = None - - return instance_info - -def boto_supports_profile_name_arg(ec2): - """ - Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0 - - ec2: authenticated ec2 connection object - - Returns: - True if Boto library accept instance_profile_name argument, else false - """ - run_instances_method = getattr(ec2, 'run_instances') - return 'instance_profile_name' in run_instances_method.func_code.co_varnames - - -def create_instances(module, ec2): - """ - Creates new instances - - module : AnsibleModule object - ec2: authenticated ec2 connection object - - Returns: - A list of dictionaries with instance information - about the instances that were launched - """ - - key_name = module.params.get('key_name') - id = module.params.get('id') - group_name = module.params.get('group') - group_id = module.params.get('group_id') - zone = module.params.get('zone') - instance_type = module.params.get('instance_type') - image = module.params.get('image') - count = module.params.get('count') - monitoring = module.params.get('monitoring') - kernel = module.params.get('kernel') - ramdisk = module.params.get('ramdisk') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - placement_group = module.params.get('placement_group') - user_data = module.params.get('user_data') - instance_tags = module.params.get('instance_tags') - vpc_subnet_id = module.params.get('vpc_subnet_id') - private_ip = module.params.get('private_ip') - instance_profile_name = module.params.get('instance_profile_name') - root_ebs_size = module.params.get('root_ebs_size') - - if root_ebs_size: - dev_sda1 = boto.ec2.blockdevicemapping.EBSBlockDeviceType() - dev_sda1.size = root_ebs_size - bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping() - bdm['/dev/sda1'] = dev_sda1 - else: - bdm = None - - - # group_id and group_name are exclusive of each other - if group_id and group_name: - module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) - sys.exit(1) - - try: - # Here we try to lookup the group id from the security group name - if group is set. - if group_name: - grp_details = ec2.get_all_security_groups() - if type(group_name) == list: - group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] - elif type(group_name) == str: - for grp in grp_details: - if str(group_name) in str(grp): - group_id = [str(grp.id)] - group_name = [group_name] - # Now we try to lookup the group id testing if group exists. - elif group_id: - #wrap the group_id in a list if it's not one already - if type(group_id) == str: - group_id = [group_id] - grp_details = ec2.get_all_security_groups(group_ids=group_id) - grp_item = grp_details[0] - group_name = [grp_item.name] - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - # Lookup any instances that much our run id. - - running_instances = [] - count_remaining = int(count) - - if id != None: - filter_dict = {'client-token':id, 'instance-state-name' : 'running'} - previous_reservations = ec2.get_all_instances(None, filter_dict) - for res in previous_reservations: - for prev_instance in res.instances: - running_instances.append(prev_instance) - count_remaining = count_remaining - len(running_instances) - - # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. - - if count_remaining == 0: - changed = False - else: - changed = True - try: - params = {'image_id': image, - 'key_name': key_name, - 'client_token': id, - 'min_count': count_remaining, - 'max_count': count_remaining, - 'monitoring_enabled': monitoring, - 'placement': zone, - 'placement_group': placement_group, - 'instance_type': instance_type, - 'kernel_id': kernel, - 'ramdisk_id': ramdisk, - 'subnet_id': vpc_subnet_id, - 'private_ip_address': private_ip, - 'user_data': user_data, - 'block_device_map': bdm} - - if boto_supports_profile_name_arg(ec2): - params['instance_profile_name'] = instance_profile_name - else: - if instance_profile_name is not None: - module.fail_json( - msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") - - if vpc_subnet_id: - params['security_group_ids'] = group_id - else: - params['security_groups'] = group_name - - res = ec2.run_instances(**params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - instids = [ i.id for i in res.instances ] - while True: - try: - res.connection.get_all_instances(instids) - break - except boto.exception.EC2ResponseError as e: - if "InvalidInstanceID.NotFound" in str(e): - # there's a race between start and get an instance - continue - else: - module.fail_json(msg = str(e)) - - if instance_tags: - try: - ec2.create_tags(instids, instance_tags) - except boto.exception.EC2ResponseError as e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # wait here until the instances are up - this_res = [] - num_running = 0 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and num_running < len(instids): - res_list = res.connection.get_all_instances(instids) - if len(res_list) > 0: - this_res = res_list[0] - num_running = len([ i for i in this_res.instances if i.state=='running' ]) - else: - # got a bad response of some sort, possibly due to - # stale/cached data. Wait a second and then try again - time.sleep(1) - continue - if wait and num_running < len(instids): - time.sleep(5) - else: - break - - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) - - for inst in this_res.instances: - running_instances.append(inst) - - instance_dict_array = [] - created_instance_ids = [] - for inst in running_instances: - d = get_instance_info(inst) - created_instance_ids.append(inst.id) - instance_dict_array.append(d) - - return (instance_dict_array, created_instance_ids, changed) - - -def terminate_instances(module, ec2, instance_ids): - """ - Terminates a list of instances - - module: Ansible module object - ec2: authenticated ec2 connection object - termination_list: a list of instances to terminate in the form of - [ {id: }, ..] - - Returns a dictionary of instance information - about the instances terminated. - - If the instance to be terminated is running - "changed" will be set to False. - - """ - - # Whether to wait for termination to complete before returning - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - changed = False - instance_dict_array = [] - - if not isinstance(instance_ids, list) or len(instance_ids) < 1: - module.fail_json(msg='instance_ids should be a list of instances, aborting') - - terminated_instance_ids = [] - for res in ec2.get_all_instances(instance_ids): - for inst in res.instances: - if inst.state == 'running': - terminated_instance_ids.append(inst.id) - instance_dict_array.append(get_instance_info(inst)) - try: - ec2.terminate_instances([inst.id]) - except EC2ResponseError as e: - module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) - changed = True - - # wait here until the instances are 'terminated' - if wait: - num_terminated = 0 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids): - response = ec2.get_all_instances( \ - instance_ids=terminated_instance_ids, \ - filters={'instance-state-name':'terminated'}) - try: - num_terminated = len(response.pop().instances) - except Exception, e: - # got a bad response of some sort, possibly due to - # stale/cached data. Wait a second and then try again - time.sleep(1) - continue - - if num_terminated < len(terminated_instance_ids): - time.sleep(5) - - # waiting took too long - if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids): - module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime()) - - return (changed, instance_dict_array, terminated_instance_ids) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - key_name = dict(aliases = ['keypair']), - id = dict(), - group = dict(type='list'), - group_id = dict(type='list'), - region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - zone = dict(aliases=['aws_zone', 'ec2_zone']), - instance_type = dict(aliases=['type']), - image = dict(), - kernel = dict(), - count = dict(default='1'), - monitoring = dict(type='bool', default=False), - ramdisk = dict(), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - ec2_url = dict(), - ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), - ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), - placement_group = dict(), - user_data = dict(), - instance_tags = dict(type='dict'), - vpc_subnet_id = dict(), - private_ip = dict(), - instance_profile_name = dict(), - instance_ids = dict(type='list'), - state = dict(default='present'), - root_ebs_size = dict(default=None), - ) - ) - - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - # If we have a region specified, connect to its endpoint. - if region: - try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - # If we specified an ec2_url then try connecting to it - elif ec2_url: - try: - ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - module.fail_json(msg="Either region or ec2_url must be specified") - - if module.params.get('state') == 'absent': - instance_ids = module.params.get('instance_ids') - if not isinstance(instance_ids, list): - module.fail_json(msg='termination_list needs to be a list of instances to terminate') - - (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) - - elif module.params.get('state') == 'present': - # Changed is always set to true when provisioning new instances - if not module.params.get('key_name'): - module.fail_json(msg='key_name parameter is required for new instance') - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2) - - module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/playbooks/library/ec2_lookup b/playbooks/library/ec2_lookup index e1ab8bc1e49..92c3161d351 100644 --- a/playbooks/library/ec2_lookup +++ b/playbooks/library/ec2_lookup @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -14,6 +14,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import +from __future__ import print_function +import six DOCUMENTATION = ''' --- module: ec2_lookup @@ -82,7 +85,7 @@ try: import boto.ec2 from boto.ec2 import connect_to_region except ImportError: - print "failed=True msg='boto required for this module'" + print("failed=True msg='boto required for this module'") sys.exit(1) @@ -100,7 +103,6 @@ def main(): ) ) - tags = module.params.get('tags') aws_secret_key = module.params.get('aws_secret_key') aws_access_key = module.params.get('aws_access_key') region = module.params.get('region') @@ -111,14 +113,14 @@ def main(): try: ec2 = connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) # If we specified an ec2_url then try connecting to it elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") @@ -126,11 +128,11 @@ def main(): instances = [] instance_ids = [] for res in ec2.get_all_instances(filters={'tag:' + tag: value - for tag, value in tags.iteritems()}): + for tag, value in six.iteritems(module.params.get('tags'))}): for inst in res.instances: if inst.state == "running": - instances.append({k: v for k, v in inst.__dict__.iteritems() - if isinstance(v, (basestring))}) + instances.append({k: v for k, v in six.iteritems(inst.__dict__) + if isinstance(v, (six.string_types))}) instance_ids.append(inst.id) module.exit_json(changed=False, instances=instances, instance_ids=instance_ids) diff --git a/playbooks/library/ec2_rt b/playbooks/library/ec2_rt new file mode 100644 index 00000000000..138754d19be --- /dev/null +++ b/playbooks/library/ec2_rt @@ -0,0 +1,233 @@ +#!/usr/bin/env python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import +from __future__ import print_function +DOCUMENTATION = """ +--- +module: ec2_rt +short_description: Create or delete AWS Route Table +description: + - Can create or delete AwS Subnets +version_added: "1.8" +author: Edward Zarecor +options: + state: + description: + - create, update or delete the subnet + required: true + choices: ['present', 'absent'] + name: + description: + - Unique name for subnet + required: true + destination_cidr: + description: + - The cidr block of the subnet + aliases: ['cidr'] + vpc_id: + description: + - The VPC that this acl belongs to + required: true + default: null +extends_documentation_fragment: aws +""" + +EXAMPLES = ''' +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * +import sys +try: + import boto.vpc +except ImportError: + print("failed=True msg={0}".format(sys.executable)) + sys.exit(1) + + +class DuplicateRouteTableError(Exception): + pass + +class InconsistentRouteError(Exception): + pass + +class RTManager(): + + def __init__(self, connection, vpc_id, route_name, routes, tags): + self.connection = connection + self.vpc_id = vpc_id + self.name = route_name + self.routes = routes + self.tags = tags + self.rt = None + + def get_rt(self): + rt_filter = { "vpc_id": self.vpc_id, + "tag:Name": self.name, + } + results = self.connection.get_all_route_tables(filters=rt_filter) + + if len(results) == 1: + self.rt = results[0] + elif len(results) > 1: + msg = "Found multiple route tables with name '{}' in vpc with id '{}'." + raise DuplicateRouteTableError(msg.format(self.name, self.vpc_id)) + else: + pass + # Doesn't exist yet + + return self.rt + + def do_tags(self): + tags = { "Name" : self.name } + if self.tags: + for tag in self.tags: + tags[tag['key']] = tag['value'] + self.rt.add_tags(tags) + + def create_rt(self): + self.rt = self.connection.create_route_table(self.vpc_id) + changed = True + self.do_tags() + return changed + + def routes_match(self, new_route, existing_route): + # Not the same route + if new_route['cidr'] != existing_route.destination_cidr_block: + return False + + instance_matches = existing_route.instance_id \ + and existing_route.instance_id == new_route['instance'] + + gateway_matches = existing_route.gateway_id \ + and existing_route.gateway_id == new_route['gateway'] + + return instance_matches or gateway_matches + + def update_routes(self): + changed = False + existing_routes = { x.destination_cidr_block : x for x in self.rt.routes } + + for route in self.routes: + # Build the args used to call the boto API + call_args = { + "route_table_id": self.rt.id, + "destination_cidr_block": route['cidr'], + } + + if "gateway" in route and "instance" in route: + msg = "Both gateway and instance specified for route" + \ + "with CIDR {}" + raise InconsistentRouteError(msg.format(route['cidr'])) + elif "gateway" in route: + call_args['gateway_id'] = route['gateway'] + elif "instance" in route: + call_args['instance_id'] = route['instance'] + else: + msg = "No gateway or instance provided for route with" + \ + "CIDR {}" + raise InconsistentRouteError(msg.format(route['cidr'])) + + if route['cidr'] in existing_routes: + # Update the route + existing_route = existing_routes[route['cidr']] + + if self.routes_match(route, existing_route): + continue + + self.connection.replace_route(**call_args) + changed = True + else: + # Create a new route + self.connection.create_route(**call_args) + changed = True + + return changed + + def present(self): + changed = False + existing = self.get_rt() + + if existing: + changed = self.update_routes() + else: + changed = self.create_rt() + self.update_routes() + + results = dict(changed=changed, + id=self.rt.id, + name=self.name, + routes=self.routes, + ) + + return results + + def absent(self): + rt = self.get_rt() + changed = False + + if rt: + changed = self.connection.delet_route_table(rt.id) + + results = dict(changed=changed, + id=self.rt.id, + name=self.name, + ) + + return results + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent']), + vpc_id=dict(required=True, type='str'), + routes=dict(required=True, type='list', aliases=['dest_routes']), + tags=dict(type='list'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + profile = module.params.get('profile') + vpc_id = module.params.get('vpc_id') + route_name = module.params.get('name') + routes = module.params.get('routes') + tags = module.params.get('tags') + + if region: + try: + connection = boto.vpc.connect_to_region(region,profile_name=profile) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg = str(e)) + else: + module.fail_json(msg="region must be specified") + + manager = RTManager(connection, vpc_id, route_name, routes, tags) + + state = module.params.get('state') + + results = dict() + if state == 'present': + results = manager.present() + elif state == 'absent': + results = manager.absent() + + module.exit_json(**results) + +main() diff --git a/playbooks/library/ec2_subnet b/playbooks/library/ec2_subnet new file mode 100644 index 00000000000..254c91cfb87 --- /dev/null +++ b/playbooks/library/ec2_subnet @@ -0,0 +1,235 @@ +#!/usr/bin/env python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import +from __future__ import print_function +DOCUMENTATION = """ +--- +module: ec2_subnet +short_description: Create or delete AWS Subnets +description: + - Can create or delete AwS Subnets +version_added: "1.8" +author: Edward Zarecor +options: + state: + description: + - create, update or delete the subnet + required: true + choices: ['present', 'absent'] + name: + description: + - Unique name for subnet + required: true + cidr_block: + description: + - The cidr block of the subnet + aliases: ['cidr'] + availability_zone + description: + - The availability zone of the subnet + aliases: ['az'] + vpc_id: + description: + - The VPC that this acl belongs to + required: true + default: null +extends_documentation_fragment: aws +""" + +EXAMPLES = ''' +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * +import sys +try: + import boto.vpc +except ImportError: + print("failed=True msg='boto required for this module'") + sys.exit(1) + +from boto.exception import NoAuthHandlerFound + + +class NonUniqueSubnetSpecification(Exception): + pass + + +class SubnetManager: + + def __init__(self, connection, vpc_id, cidr_block, az, name, route_table_id, network_acl_id, tags=[]): + + self.connection = connection + self.vpc_id = vpc_id + self.cidr_block = cidr_block + self.az = az + self.name = name + self.route_table_id = route_table_id + self.network_acl_id = network_acl_id + self.tags = tags + self.subnet = None + + def get_subnet(self): + + if not self.subnet: + + subnets = self.connection. \ + get_all_subnets(filters={"vpc_id": self.vpc_id, + "cidr_block": self.cidr_block, + "availability_zone": self.az}) + if len(subnets) > 1: + message = "Subnet specifier of vpc_id {vpc_id}, cidr_block {cidr_block} " \ + "and az {az}, return more than one result".format( + vpc_id=self.vpc_id, cidr_block=self.cidr_block,az=self.az) + raise NonUniqueSubnetSpecification(message) + elif len(subnets) == 1: + self.subnet = subnets[0] + + return self.subnet + + def present(self): + + if self.get_subnet(): + changed = self.update_subnet() + else: + changed = self.create_subnet() + + results = dict(changed=changed, + subnet_id=self.subnet.id, + subnet_name=self.name, + vpc_id=self.vpc_id) + + return results + + def create_subnet(self): + changed = True + self.subnet = self.connection.create_subnet(self.vpc_id, self.cidr_block, availability_zone=self.az) + self.do_tags() + self.connection.associate_route_table(self.route_table_id, self.subnet.id) + if self.network_acl_id: + self.connection.associate_network_acl(self.network_acl_id, self.subnet.id) + return changed + + def update_subnet(self): + changed = False + self.do_tags() + + results = self.connection.get_all_route_tables( + filters={'association.subnet_id': self.subnet.id, 'vpc_id': self.vpc_id}) + + if len(results) == 1: + route_table = results[0] + assoc = self.get_association_from_route_table(route_table, self.subnet) + if assoc.route_table_id != self.route_table_id: + self.connection.replace_route_table_association_with_assoc(assoc.id, self.route_table_id) + changed = True + elif len(results) == 0: + # unlikely unless manual monkeying around + self.connection.associate_route_table(self.route_table_id, self.subnet.id) + changed == True + + if self.network_acl_id: + self.connection.associate_network_acl(self.network_acl_id, self.subnet.id) + + # acl_results = self.connection.get_all_network_acls( + # filters={'association.subnet_id': self.subnet.id, 'vpc_id': self.vpc_id}) + # + # if len(acl_results) == 1: + # acl = acl_results[0] + # + # if acl.id != self.network_acl_id: + # self.connection.disassociate_network_acl + + return changed + + def absent(self): + changed = self.connection.delete_subnet(self.subnet.id) + return dict(changed=changed) + + def get_association_from_route_table(self, route_table, subnet): + target = None + for assoc in route_table.associations: + if assoc.subnet_id == subnet.id: + target = assoc + break + + return target + + def do_tags(self): + """ + Utility that creates all tags including the Name tag which is treated + as a first class params as a convenience. Currently updates + existing tags, as the API overwrites them, but does not remove + orphans. + :return: None + """ + tags = {'Name': self.name} + if self.tags: + for tag in self.tags: + tags[tag['key']] = tag['value'] + + self.subnet.add_tags(tags) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent']), + vpc_id=dict(required=True, type='str'), + cidr_block=dict(required=True, type='str', aliases=['cidr']), + az=dict(required=True, type='str'), + route_table_id=dict(required=True, type='str'), + network_acl_id=dict(type='str'), + tags=dict(type='list'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + profile = module.params.get('profile') + if region: + try: + connection = boto.vpc.connect_to_region(region, profile_name=profile) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + vpc_id = module.params.get('vpc_id') + cidr_block = module.params.get('cidr_block') + az = module.params.get('az') + name = module.params.get('name') + route_table_id = module.params.get('route_table_id') + network_acl_id = module.params.get('network_acl_id') + tags = module.params.get('tags') + + manager = SubnetManager(connection, vpc_id, cidr_block, az, name, route_table_id, network_acl_id, tags) + + state = module.params.get('state') + + if state == 'present': + results = manager.present() + elif state == 'absent': + results = manager.absent() + else: + raise Exception("Unexpected value for state {0}".format(state)) + + module.exit_json(**results) + +main() diff --git a/playbooks/library/ec2_tag_local b/playbooks/library/ec2_tag_local new file mode 100644 index 00000000000..f529a397f46 --- /dev/null +++ b/playbooks/library/ec2_tag_local @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# edX: Edited to allow for variable tag names + +from __future__ import absolute_import +from __future__ import print_function +DOCUMENTATION = ''' +--- +module: ec2_tag +short_description: create and remove tag(s) to ec2 resources. +description: + - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. +version_added: "1.3" +options: + resource: + description: + - The EC2 resource id. + required: true + default: null + aliases: [] + state: + description: + - Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance. + required: false + default: present + choices: ['present', 'absent', 'list'] + aliases: [] + region: + description: + - region in which the resource exists. + required: false + default: null + aliases: ['aws_region', 'ec2_region'] + +author: Lester Wade +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic example of adding tag(s) +tasks: +- name: tag a resource + local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present + args: + tags: + Name: ubervol + env: prod + +# Playbook example of adding tag(s) to spawned instances +tasks: +- name: launch some instances + local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 + register: ec2 + +- name: tag my launched instances + local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present + with_items: "{{ ec2.instances }}" + args: + tags: + Name: webserver + env: prod + +# Adding tags with dynamic names as well as dynamic values. +tasks: +- name: tag my instance + local_action: ec2_ntag resource={{ item.id }} region=us-east-1 state=present + with_items: "{{ ec2.instances }}" + args: + tags: + - Name: "{{ some_variable }}" + Value: "{{ some_other_variable }}" +''' + +# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. +# if state=present and it doesn't exist, create, tag and attach. +# Check for state by looking for volume attachment with tag (and against block device mapping?). +# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). + +import sys +import time + +try: + import boto.ec2 +except ImportError: + print("failed=True msg='boto required for this module'") + sys.exit(1) + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + resource = dict(required=True), + tags = dict(required=False, type='list'), + state = dict(default='present', choices=['present', 'absent', 'list']), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + resource = module.params.get('resource') + tags_param = module.params.get('tags') + state = module.params.get('state') + + ec2 = ec2_connect(module) + + # We need a comparison here so that we can accurately report back changed status. + # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. + filters = {'resource-id' : resource} + gettags = ec2.get_all_tags(filters=filters) + + dictadd = {} + dictremove = {} + baddict = {} + tagdict = {} + tags = {} + for tag in gettags: + tagdict[tag.name] = tag.value + + if isinstance(tags_param, list): + for item in tags_param: + tags[item['Name']] = item['Value'] + else: + tags = tags_param + + if state == 'present': + if not tags: + module.fail_json(msg="tags argument is required when state is present") + if set(tags.items()).issubset(set(tagdict.items())): + module.exit_json(msg="Tags already exists in %s." %resource, changed=False) + else: + for (key, value) in set(tags.items()): + if (key, value) not in set(tagdict.items()): + dictadd[key] = value + tagger = ec2.create_tags(resource, dictadd) + gettags = ec2.get_all_tags(filters=filters) + module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) + + if state == 'absent': + if not tags: + module.fail_json(msg="tags argument is required when state is absent") + for (key, value) in set(tags.items()): + if (key, value) not in set(tagdict.items()): + baddict[key] = value + if set(baddict) == set(tags): + module.exit_json(msg="Nothing to remove here. Move along.", changed=False) + for (key, value) in set(tags.items()): + if (key, value) in set(tagdict.items()): + dictremove[key] = value + tagger = ec2.delete_tags(resource, dictremove) + gettags = ec2.get_all_tags(filters=filters) + module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) + + if state == 'list': + module.exit_json(changed=False, **tagdict) + sys.exit(0) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/playbooks/library/ec2_vpc_local b/playbooks/library/ec2_vpc_local new file mode 100644 index 00000000000..8f457fec4eb --- /dev/null +++ b/playbooks/library/ec2_vpc_local @@ -0,0 +1,741 @@ +#!/usr/bin/env python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Taken from version: 1.9 - PR to push up the changes here: +# https://github.com/ansible/ansible-modules-core/pull/1323 + + +from __future__ import absolute_import +DOCUMENTATION = ''' +--- +module: ec2_vpc +short_description: configure AWS virtual private clouds +description: + - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. +version_added: "1.4" +options: + cidr_block: + description: + - "The cidr block representing the VPC, e.g. 10.0.0.0/16" + required: false, unless state=present + instance_tenancy: + description: + - "The supported tenancy options for instances launched into the VPC." + required: false + default: "default" + choices: [ "default", "dedicated" ] + dns_support: + description: + - toggles the "Enable DNS resolution" flag + required: false + default: "yes" + choices: [ "yes", "no" ] + dns_hostnames: + description: + - toggles the "Enable DNS hostname support for instances" flag + required: false + default: "yes" + choices: [ "yes", "no" ] + subnets: + description: + - 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.' + required: false + default: null + aliases: [] + vpc_id: + description: + - A VPC id to terminate when state=absent + required: false + default: null + aliases: [] + resource_tags: + description: + - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' + required: true + default: null + aliases: [] + version_added: "1.6" + internet_gateway: + description: + - Toggle whether there should be an Internet gateway attached to the VPC + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] + internet_gateway_tags: + description: + - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }.' + required: false + default: null + aliases: [] + route_tables: + description: + - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' + required: false + default: null + aliases: [] + wait: + description: + - wait for the VPC to be in state 'available' before returning + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 + aliases: [] + state: + description: + - Create or terminate the VPC + required: true + default: present + aliases: [] + region: + description: + - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. + required: true + default: null + aliases: ['aws_region', 'ec2_region'] +author: Carson Gee +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic creation example: + ec2_vpc: + state: present + cidr_block: 172.23.0.0/16 + resource_tags: { "Environment":"Development" } + region: us-west-2 +# Full creation example with subnets and optional availability zones. +# The absence or presence of subnets deletes or creates them respectively. + ec2_vpc: + state: present + cidr_block: 172.22.0.0/16 + resource_tags: { "Environment":"Development" } + subnets: + - cidr: 172.22.1.0/24 + az: us-west-2c + resource_tags: { "Environment":"Dev", "Tier" : "Web" } + - cidr: 172.22.2.0/24 + az: us-west-2b + resource_tags: { "Environment":"Dev", "Tier" : "App" } + - cidr: 172.22.3.0/24 + az: us-west-2a + resource_tags: { "Environment":"Dev", "Tier" : "DB" } + internet_gateway: True + internet_gateway_tags: + Environment: Dev + route_tables: + - subnets: + - 172.22.2.0/24 + - 172.22.3.0/24 + routes: + - dest: 0.0.0.0/0 + gw: igw + - subnets: + - 172.22.1.0/24 + routes: + - dest: 0.0.0.0/0 + gw: igw + region: us-west-2 + register: vpc + +# Removal of a VPC by id + ec2_vpc: + state: absent + vpc_id: vpc-aaaaaaa + region: us-west-2 +If you have added elements not managed by this module, e.g. instances, NATs, etc then +the delete will fail until those dependencies are removed. +''' + + +import time + +try: + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError + + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def get_vpc_info(vpc): + """ + Retrieves vpc information from an instance + ID and returns it as a dictionary + """ + + return({ + 'id': vpc.id, + 'cidr_block': vpc.cidr_block, + 'dhcp_options_id': vpc.dhcp_options_id, + 'region': vpc.region.name, + 'state': vpc.state, + }) + +def get_igw_info(igw): + """ + Get info about the internet gateway. + """ + if igw is None: + return {} + + return ({ + 'id': igw.id, + }) + +def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): + """ + Finds a VPC that matches a specific id or cidr + tags + + module : AnsibleModule object + vpc_conn: authenticated VPCConnection connection object + + Returns: + A VPC object that matches either an ID or CIDR and one or more tag values + """ + + if vpc_id == None and cidr == None: + module.fail_json( + msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting' + ) + + found_vpcs = [] + + resource_tags = module.params.get('resource_tags') + + # Check for existing VPC by cidr_block or id + if vpc_id is not None: + found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',}) + + else: + previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'}) + + for vpc in previous_vpcs: + # Get all tags for each of the found VPCs + vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) + + # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC + if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())): + found_vpcs.append(vpc) + + found_vpc = None + + if len(found_vpcs) == 1: + found_vpc = found_vpcs[0] + + if len(found_vpcs) > 1: + module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting') + + return (found_vpc) + +def routes_match(rt_list=None, rt=None, igw=None): + + """ + Check if the route table has all routes as in given list + + rt_list : A list if routes provided in the module + rt : The Remote route table object + igw : The internet gateway object for this vpc + + Returns: + True when there provided routes and remote routes are the same. + False when provided routes and remote routes are diffrent. + """ + + local_routes = [] + remote_routes = [] + for route in rt_list: + route_kwargs = {} + if route['gw'] == 'igw': + route_kwargs['gateway_id'] = igw.id + route_kwargs['instance_id'] = None + route_kwargs['state'] = 'active' + elif route['gw'].startswith('i-'): + route_kwargs['instance_id'] = route['gw'] + route_kwargs['gateway_id'] = None + route_kwargs['state'] = 'active' + else: + route_kwargs['gateway_id'] = route['gw'] + route_kwargs['instance_id'] = None + route_kwargs['state'] = 'active' + route_kwargs['destination_cidr_block'] = route['dest'] + local_routes.append(route_kwargs) + for j in rt.routes: + remote_routes.append(j.__dict__) + match = [] + for i in local_routes: + change = "false" + for j in remote_routes: + if set(i.items()).issubset(set(j.items())): + change = "true" + match.append(change) + if 'false' in match: + return False + else: + return True + +def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None): + """ + Checks if the remote routes match the local routes. + + route_tables : Route_tables parameter in the module + vpc_conn : The VPC conection object + module : The module object + vpc : The vpc object for this route table + igw : The internet gateway object for this vpc + + Returns: + True when there is diffrence beween the provided routes and remote routes and if subnet assosications are diffrent. + False when both routes and subnet associations matched. + + """ + #We add a one for the main table + rtb_len = len(route_tables) + 1 + remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})) + if remote_rtb_len != rtb_len: + return True + for rt in route_tables: + rt_id = None + for sn in rt['subnets']: + rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id }) + if len(rsn) != 1: + module.fail_json( + msg='The subnet {0} to associate with route_table {1} ' \ + 'does not exist, aborting'.format(sn, rt) + ) + nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id}) + if not nrt: + return True + else: + nrt = nrt[0] + if not rt_id: + rt_id = nrt.id + if not routes_match(rt['routes'], nrt, igw): + return True + continue + else: + if rt_id == nrt.id: + continue + else: + return True + return True + return False + + +def create_vpc(module, vpc_conn): + """ + Creates a new or modifies an existing VPC. + + module : AnsibleModule object + vpc_conn: authenticated VPCConnection connection object + + Returns: + A dictionary with information + about the VPC and subnets that were launched + """ + + id = module.params.get('vpc_id') + cidr_block = module.params.get('cidr_block') + instance_tenancy = module.params.get('instance_tenancy') + dns_support = module.params.get('dns_support') + dns_hostnames = module.params.get('dns_hostnames') + subnets = module.params.get('subnets') + internet_gateway = module.params.get('internet_gateway') + internet_gateway_tags = module.params.get('internet_gateway_tags') + route_tables = module.params.get('route_tables') + vpc_spec_tags = module.params.get('resource_tags') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + changed = False + + # Check for existing VPC by cidr_block + tags or id + previous_vpc = find_vpc(module, vpc_conn, id, cidr_block) + + if previous_vpc is not None: + changed = False + vpc = previous_vpc + else: + changed = True + try: + vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy) + # wait here until the vpc is available + pending = True + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time() and pending: + try: + pvpc = vpc_conn.get_all_vpcs(vpc.id) + if hasattr(pvpc, 'state'): + if pvpc.state == "available": + pending = False + elif hasattr(pvpc[0], 'state'): + if pvpc[0].state == "available": + pending = False + # sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs() + # when that happens, just wait a bit longer and try again + except boto.exception.BotoServerError as e: + if e.error_code != 'InvalidVpcID.NotFound': + raise + if pending: + time.sleep(5) + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime()) + + except boto.exception.BotoServerError as e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + + # Done with base VPC, now change to attributes and features. + + # Add resource tags + vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) + + if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())): + new_tags = {} + + for (key, value) in set(vpc_spec_tags.items()): + if (key, value) not in set(vpc_tags.items()): + new_tags[key] = value + + if new_tags: + vpc_conn.create_tags(vpc.id, new_tags) + + + # boto doesn't appear to have a way to determine the existing + # value of the dns attributes, so we just set them. + # It also must be done one at a time. + vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support) + vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames) + + + # Process all subnet properties + if subnets is not None: + if not isinstance(subnets, list): + module.fail_json(msg='subnets needs to be a list of cidr blocks') + + current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) + + # First add all new subnets + for subnet in subnets: + add_subnet = True + for csn in current_subnets: + if subnet['cidr'] == csn.cidr_block: + add_subnet = False + if add_subnet: + try: + new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) + new_subnet_tags = subnet.get('resource_tags', None) + if new_subnet_tags: + # Sometimes AWS takes its time to create a subnet and so using new subnets's id + # to create tags results in exception. + # boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending' + # so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet + while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0: + time.sleep(0.1) + + vpc_conn.create_tags(new_subnet.id, new_subnet_tags) + + changed = True + except EC2ResponseError as e: + module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) + + # Now delete all absent subnets + for csubnet in current_subnets: + delete_subnet = True + for subnet in subnets: + if csubnet.cidr_block == subnet['cidr']: + delete_subnet = False + if delete_subnet: + try: + vpc_conn.delete_subnet(csubnet.id) + changed = True + except EC2ResponseError as e: + module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e)) + + # Handle Internet gateway (create/delete igw) + igw = None + igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id}) + if len(igws) > 1: + module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id) + if internet_gateway: + if len(igws) != 1: + try: + igw = vpc_conn.create_internet_gateway() + if internet_gateway_tags: + vpc_conn.create_tags(igw.id, internet_gateway_tags) + vpc_conn.attach_internet_gateway(igw.id, vpc.id) + changed = True + except EC2ResponseError as e: + module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e)) + else: + # Set igw variable to the current igw instance for use in route tables. + igw = igws[0] + else: + if len(igws) > 0: + try: + vpc_conn.detach_internet_gateway(igws[0].id, vpc.id) + vpc_conn.delete_internet_gateway(igws[0].id) + changed = True + except EC2ResponseError as e: + module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e)) + + # Handle route tables - this may be worth splitting into a + # different module but should work fine here. The strategy to stay + # indempotent is to basically build all the route tables as + # defined, track the route table ids, and then run through the + # remote list of route tables and delete any that we didn't + # create. This shouldn't interrupt traffic in theory, but is the + # only way to really work with route tables over time that I can + # think of without using painful aws ids. Hopefully boto will add + # the replace-route-table API to make this smoother and + # allow control of the 'main' routing table. + if route_tables is not None: + rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw) + if route_tables is not None and rtb_needs_change: + if not isinstance(route_tables, list): + module.fail_json(msg='route tables need to be a list of dictionaries') + + # Work through each route table and update/create to match dictionary array + all_route_tables = [] + for rt in route_tables: + try: + new_rt = vpc_conn.create_route_table(vpc.id) + for route in rt['routes']: + route_kwargs = {} + if route['gw'] == 'igw': + if not internet_gateway: + module.fail_json( + msg='You asked for an Internet Gateway ' \ + '(igw) route, but you have no Internet Gateway' + ) + route_kwargs['gateway_id'] = igw.id + elif route['gw'].startswith('i-'): + route_kwargs['instance_id'] = route['gw'] + else: + route_kwargs['gateway_id'] = route['gw'] + vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs) + + # Associate with subnets + for sn in rt['subnets']: + rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id }) + if len(rsn) != 1: + module.fail_json( + msg='The subnet {0} to associate with route_table {1} ' \ + 'does not exist, aborting'.format(sn, rt) + ) + rsn = rsn[0] + + # Disassociate then associate since we don't have replace + old_rt = vpc_conn.get_all_route_tables( + filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id} + ) + old_rt = [ x for x in old_rt if x.id != None ] + if len(old_rt) == 1: + old_rt = old_rt[0] + association_id = None + for a in old_rt.associations: + if a.subnet_id == rsn.id: + association_id = a.id + vpc_conn.disassociate_route_table(association_id) + + vpc_conn.associate_route_table(new_rt.id, rsn.id) + + all_route_tables.append(new_rt) + changed = True + except EC2ResponseError as e: + module.fail_json( + msg='Unable to create and associate route table {0}, error: ' \ + '{1}'.format(rt, e) + ) + + # Now that we are good to go on our new route tables, delete the + # old ones except the 'main' route table as boto can't set the main + # table yet. + all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + for rt in all_rts: + if rt.id is None: + continue + delete_rt = True + for newrt in all_route_tables: + if newrt.id == rt.id: + delete_rt = False + break + if delete_rt: + rta = rt.associations + is_main = False + for a in rta: + if a.main: + is_main = True + break + try: + if not is_main: + vpc_conn.delete_route_table(rt.id) + changed = True + except EC2ResponseError as e: + module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e)) + + vpc_dict = get_vpc_info(vpc) + igw_dict = get_igw_info(igw) + created_vpc_id = vpc.id + returned_subnets = [] + current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) + + for sn in current_subnets: + returned_subnets.append({ + 'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), + 'cidr': sn.cidr_block, + 'az': sn.availability_zone, + 'id': sn.id, + }) + + if subnets is not None: + # Sort subnets by the order they were listed in the play + order = {} + for idx, val in enumerate(subnets): + order[val['cidr']] = idx + + # Number of subnets in the play + subnets_in_play = len(subnets) + returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play)) + + return (vpc_dict, created_vpc_id, returned_subnets, igw_dict, changed) + +def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): + """ + Terminates a VPC + + module: Ansible module object + vpc_conn: authenticated VPCConnection connection object + vpc_id: a vpc id to terminate + cidr: The cidr block of the VPC - can be used in lieu of an ID + + Returns a dictionary of VPC information + about the VPC terminated. + + If the VPC to be terminated is available + "changed" will be set to True. + + """ + vpc_dict = {} + terminated_vpc_id = '' + changed = False + + vpc = find_vpc(module, vpc_conn, vpc_id, cidr) + + if vpc is not None: + if vpc.state == 'available': + terminated_vpc_id=vpc.id + vpc_dict=get_vpc_info(vpc) + try: + subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id}) + for sn in subnets: + vpc_conn.delete_subnet(sn.id) + + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc.id} + ) + for igw in igws: + vpc_conn.detach_internet_gateway(igw.id, vpc.id) + vpc_conn.delete_internet_gateway(igw.id) + + rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}) + for rt in rts: + rta = rt.associations + is_main = False + for a in rta: + if a.main: + is_main = True + if not is_main: + vpc_conn.delete_route_table(rt.id) + + vpc_conn.delete_vpc(vpc.id) + except EC2ResponseError as e: + module.fail_json( + msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e) + ) + changed = True + + return (changed, vpc_dict, terminated_vpc_id) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + cidr_block = dict(), + instance_tenancy = dict(choices=['default', 'dedicated'], default='default'), + wait = dict(type='bool', default=False), + wait_timeout = dict(default=300), + dns_support = dict(type='bool', default=True), + dns_hostnames = dict(type='bool', default=True), + subnets = dict(type='list'), + vpc_id = dict(), + internet_gateway = dict(type='bool', default=False), + internet_gateway_tags = dict(type='dict'), + resource_tags = dict(type='dict', required=True), + route_tables = dict(type='list'), + state = dict(choices=['present', 'absent'], default='present'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + state = module.params.get('state') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + # If we have a region specified, connect to its endpoint. + if region: + try: + vpc_conn = boto.vpc.connect_to_region( + region, + **aws_connect_kwargs + ) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg = str(e)) + else: + module.fail_json(msg="region must be specified") + + igw_dict = {} + if module.params.get('state') == 'absent': + vpc_id = module.params.get('vpc_id') + cidr = module.params.get('cidr_block') + (changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr) + subnets_changed = None + elif module.params.get('state') == 'present': + # Changed is always set to true when provisioning a new VPC + (vpc_dict, new_vpc_id, subnets_changed, igw_dict, changed) = create_vpc(module, vpc_conn) + + module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw=igw_dict, subnets=subnets_changed) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/playbooks/library/mongodb_replica_set b/playbooks/library/mongodb_replica_set new file mode 100644 index 00000000000..8026d24ddfe --- /dev/null +++ b/playbooks/library/mongodb_replica_set @@ -0,0 +1,385 @@ +#!/usr/bin/env python + +from __future__ import absolute_import +from six.moves import filter +from six.moves import map +from six.moves import range +from six.moves import zip +DOCUMENTATION = """ +--- +module: mongodb_replica_set +short_description: Modify replica set config. +description: + - Modify replica set config, including modifying/adding/removing members from a replica set + changing replica set options, and initiating the replica set if necessary. + Uses replSetReconfig and replSetInitiate. +version_added: "1.9" +author: + - Max Rothman + - Feanil Patel +options: + rs_host: + description: + - The hostname or ip of a server already in the mongo cluster. + required: false + default: 'localhost' + rs_port: + description: + - The port to connect to mongo on. + required: false + default: 27017 + username: + description: + - The username of the mongo user to connect as. + required: false + password: + description: + - The password to use when authenticating. + required: false + auth_database: + description: + - The database to authenticate against. + requred: false + force: + description: Whether to pass the "force" option to replSetReconfig. + For more details, see `` + required: false + default: false + rs_config: + description: A `replica set configuration document `. + This structure can be a valid document, but this module can also manage some details for you: + + - members can have separate ``host`` and ``port`` properties. ``port`` defaults to 27017. + To override this, provide a ``host`` like ``somehost:27017``. + - ``_id`` is automatically managed if not provided + - members' ``_id`` are automatically managed + - ``version`` is automatically incremented + + required: true +""" + +EXAMPLES = ''' +- name: Basic example + mongodb_replica_set: + username: root + password: password + rs_config: + members: + - host: some.host + - host: other.host + port: 27018 + hidden: true + +- name: Fully specify a whole document + mongodb_replica_set: + username: admin + password: password + rs_config: + _id: myReplicaSetName + version: 5 + members: + - _id: 1 + host: some.host:27017 + - _id: 2 + host: other.host:27017 + hidden: true +''' +# Magic import +from ansible.module_utils.basic import * + +try: + from pymongo import MongoClient + from pymongo.errors import OperationFailure + from bson import json_util +except ImportError: + pymongo_found = False +else: + pymongo_found = True + +import json, copy +from six.moves.urllib.parse import quote_plus +from operator import itemgetter + +########### Mongo API calls ########### +def get_replset(): + # Not using `replSetGetConfig` because it's not supported in MongoDB 2.x. + try: + rs_config = client.local.system.replset.find_one() + except OperationFailure as e: + return None + + return rs_config + +def initialize_replset(rs_config): + try: + client.admin.command("replSetInitiate", rs_config) + except OperationFailure as e: + module.fail_json(msg="Failed to initiate replSet: {}".format(e.message)) + +def reconfig_replset(rs_config): + try: + client.admin.command("replSetReconfig", rs_config, force=module.params['force']) + except OperationFailure as e: + module.fail_json(msg="Failed to reconfigure replSet: {}".format(e.message)) + +def get_rs_config_id(): + try: + return client.admin.command('getCmdLineOpts')['parsed']['replication']['replSetName'] + except (OperationFailure, KeyError) as e: + module.fail_json(msg=("Unable to get replSet name. " + "Was mongod started with --replSet, " + "or was replication.replSetName set in the config file? Error: ") + e.message) + + +########### Helper functions ########### +def set_member_ids(members, old_members=None): + ''' + Set the _id property of members who don't already have one. + Prefer the _id of the "matching" member from `old_members`. + ''' + #Add a little padding to ensure we don't run out of IDs + available_ids = set(range(len(members)*2)) + available_ids -= {m['_id'] for m in members if '_id' in m} + if old_members is not None: + available_ids -= {m['_id'] for m in old_members} + available_ids = list(sorted(available_ids, reverse=True)) + + for member in members: + if '_id' not in member: + if old_members is not None: + match = get_matching_member(member, old_members) + member['_id'] = match['_id'] if match is not None else available_ids.pop() + else: + member['_id'] = available_ids.pop() + +def get_matching_member(member, members): + '''Return the rs_member from `members` that "matches" `member` (currently on host)''' + match = [m for m in members if m['host'] == member['host']] + return match[0] if len(match) > 0 else None + +def members_match(new, old): + "Compare 2 lists of members, discounting their `_id`s and matching on hostname" + if len(new) != len(old): + return False + for old_member in old: + new_member = get_matching_member(old_member, new).copy() + #Don't compare on _id + new_member.pop('_id', None) + old_member = old_member.copy() + old_member.pop('_id', None) + if old_member != new_member: + return False + return True + +def fix_host_port(rs_config): + '''Fix host, port to host:port''' + if 'members' in rs_config: + if not isinstance(rs_config['members'], list): + module.fail_json(msg='rs_config.members must be a list') + + for member in rs_config['members']: + if ':' not in member['host']: + member['host'] = '{}:{}'.format(member['host'], member.get('port', 27017)) + if 'port' in member: + del member['port'] + +def check_config_subset(old_config, new_config): + ''' + Compares the old config (what we pass in to Mongo) to the new config (returned from Mongo) + It is assumed that old_config will be a subset of new_config because Mongo tracks many more + details about the replica set and the members in a replica set that we don't track in our + secure repo. + ''' + + for k in old_config: + if k == 'members': + matches = is_member_subset(old_config['members'],new_config['members']) + if not matches: return False + else: + if old_config[k] != new_config[k]: return False + + return True + + +def is_member_subset(old_members,new_members): + ''' + Compares the member list of a replica set configuration as specified (old_members) + to what Mongo has returned (new_members). If it finds anything in old_members that + does not match new_members, it will return False. new_members is allowed to contain + extra information that is not reflected in old_members because we do not necesarily + track all of mongo's internal data in the config. + ''' + + # Mongo returns the member set in no particular order, and we were + # indexing into the list using _id before without sorting which led to failure. + old_members, new_members = [sorted(k, key=itemgetter('_id')) + for k in (old_members, new_members)] + + for k1, k2 in zip(old_members, new_members): + for key, value in k1.items(): + if value != k2[key]: return False + + return True + +def update_replset(rs_config): + changed = False + old_rs_config = get_replset() + fix_host_port(rs_config) #fix host, port to host:port + + #Decide whether we need to initialize + if old_rs_config is None: + changed = True + if '_id' not in rs_config: + rs_config['_id'] = get_rs_config_id() #Errors if no replSet specified to mongod + set_member_ids(rs_config['members']) #Noop if all _ids are set + #Don't set the version, it'll auto-set + initialize_replset(rs_config) + + else: + old_rs_config_scalars = {k:v for k,v in old_rs_config.items() if not isinstance(v, (list, dict))} + + rs_config_scalars = {k:v for k,v in rs_config.items() if not isinstance(v, (list, dict))} + if '_id' not in rs_config_scalars and '_id' in old_rs_config_scalars: + # _id is going to be managed, don't compare on it + del old_rs_config_scalars['_id'] + if 'version' not in rs_config and 'version' in old_rs_config_scalars: + # version is going to be managed, don't compare on it + del old_rs_config_scalars['version'] + + # Special comparison to test whether 2 rs_configs are "equivalent" + # We can't simply use == because of special logic in `members_match()` + # 1. Compare the scalars (i.e. non-collections) + # 2. Compare the "settings" dict + # 3. Compare the members dicts using `members_match()` + # Since the only nested structures in the rs_config spec are "members" and "settings", + # if all of the above 3 match, the structures are equivalent. + if rs_config_scalars != old_rs_config_scalars \ + or rs_config.get('settings') != old_rs_config.get('settings') \ + or not members_match(rs_config['members'], old_rs_config['members']): + + changed=True + if '_id' not in rs_config: + rs_config['_id'] = old_rs_config['_id'] + if 'version' not in rs_config: + #Using manual increment to prevent race condition + rs_config['version'] = old_rs_config['version'] + 1 + + set_member_ids(rs_config['members'], old_rs_config['members']) #Noop if all _ids are set + + reconfig_replset(rs_config) + + #Validate it worked + if changed: + changed_rs_config = get_replset() + if not check_config_subset(rs_config, changed_rs_config): + module.fail_json(msg="Failed to validate that the replica set was changed", new_config=changed_rs_config, config=rs_config) + + # Remove settings from changed_rs_config before exit to avoid + # problem with exit_json() and unserializable ObjectId + # because MongoDB returns JSON which is not serializable + if changed_rs_config.get('settings') is not None: + changed_rs_config['settings'] = None + + module.exit_json(changed=changed, config=rs_config, new_config=changed_rs_config) + + +######### Client making stuff ######### +def get_mongo_uri(host, port, username, password, auth_database): + mongo_uri = 'mongodb://' + if username and password: + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) + + mongo_uri += "{}:{}".format(quote_plus(host), port) + + if auth_database: + mongo_uri += "/{}".format(quote_plus(auth_database)) + + return mongo_uri + +def primary_client(some_host, some_port, username, password, auth_database): + ''' + Given a member of a replica set, find out who the primary is + and provide a client that is connected to the primary for running + commands. + + Because this function attempts to find the primary of your replica set, + it can fail and throw PyMongo exceptions. You should handle these and + fall back to get_client. + ''' + client = get_client(some_host, some_port, username, password, auth_database) + # This can fail (throws OperationFailure), in which case code will need to + # fall back to using get_client since there either is no primary, or we can't + # know it for some reason. + status = client.admin.command("replSetGetStatus") + + # Find out who the primary is. + rs_primary = [member for member in status['members'] if member['stateStr']=='PRIMARY'][0] + primary_host, primary_port = rs_primary['name'].split(':') + + # Connect to the primary if this is not the primary. + if primary_host != some_host or primary_port != some_port: + client.close() + new_uri = get_mongo_uri(primary_host, primary_port, username, password, auth_database) + client = MongoClient(new_uri) + + return client + +def get_client(some_host, some_port, username, password, auth_database): + ''' + Connects to the given host. Does not have any of the logic of primary_client, + so is safer to use when handling an uninitialized replica set or some other + mongo instance that requires special logic. + + This function connects to Mongo, and as such can throw any of the PyMongo + exceptions. + ''' + mongo_uri = get_mongo_uri(some_host, some_port, username, password, auth_database) + client = MongoClient(mongo_uri) + return client + +################ Main ################ +def validate_args(): + arg_spec = dict( + username = dict(required=False, type='str'), + password = dict(required=False, type='str'), + auth_database = dict(required=False, type='str'), + rs_host = dict(required=False, type='str', default="localhost"), + rs_port = dict(required=False, type='int', default=27017), + rs_config = dict(required=True, type='dict'), + force = dict(required=False, type='bool', default=False), + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False) + + username = module.params.get('username') + password = module.params.get('password') + if (username and not password) or (password and not username): + module.fail_json(msg="Must provide both username and password or neither.") + + # Check that if votes is 0 priority is also 0 + for member in module.params.get('rs_config').get('members'): + if member.get('votes') == 0 and member.get('priority') != 0: + module.fail_json(msg="Non-voting member {} must have priority 0". + format(member['host'])) + + return module + + +if __name__ == '__main__': + module = validate_args() + + if not pymongo_found: + module.fail_json(msg="The python pymongo module is not installed.") + + username = module.params.get('username') + password = module.params.get('password') + auth_database = module.params.get('auth_database') + rs_host = module.params['rs_host'] + rs_port = module.params['rs_port'] + + try: + client = primary_client(rs_host, rs_port, username, password, auth_database) + except OperationFailure: + client = get_client(rs_host, rs_port, username, password, auth_database) + + update_replset(module.params['rs_config']) diff --git a/playbooks/library/mongodb_rs_config b/playbooks/library/mongodb_rs_config new file mode 100644 index 00000000000..02c9ed1fb22 --- /dev/null +++ b/playbooks/library/mongodb_rs_config @@ -0,0 +1,137 @@ +#!/usr/bin/env python + +from __future__ import absolute_import +from six.moves import map +DOCUMENTATION = """ +--- +module: mongodb_rs_config +short_description: Get the configuration of a replica set of a mongo cluster. +description: + - Get the config of the replica set of a mongo cluster. Provides a filtered version of the info from rs.config(). + returns primary, secondary, hidden. Each contains a list of the members in that state. Lists + may be empty. Additionally returns the full config document in the config key. Keep in mind that hosts may be + duplicated secondary and hidden since hidden hosts are secondaries. +version_added: "1.9" +author: + - Feanil Patel + - Kevin Falcone +options: + host: + description: + - The hostname or ip of a server in the mongo cluster. + required: false + default: 'localhost' + port: + description: + - The port to connect to mongo on. + required: false + default: 27017 + username: + description: + - The username of the mongo user to connect as. + required: false + password: + description: + - The password to use when authenticating. + required: false + auth_database: + description: + - The database to authenticate against. + required: false +""" + +EXAMPLES = ''' +- name: Get status for the stage cluster + mongodb_rs_config: + host: localhost:27017 + username: root + password: password + register: rs_config +''' +# Magic import +from ansible.module_utils.basic import * + +try: + from pymongo import MongoClient + from pymongo.errors import OperationFailure + from bson import json_util +except ImportError: + pymongo_found = False +else: + pymongo_found = True + +import json +from six.moves.urllib.parse import quote_plus + +def main(): + + arg_spec = dict( + host=dict(required=False, type='str', default="localhost"), + port=dict(required=False, type='int', default=27017), + username=dict(required=False, type='str'), + password=dict(required=False, type='str'), + auth_database=dict(required=False, type='str') + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False) + + if not pymongo_found: + module.fail_json(msg="The python pymongo module is not installed.") + + mongo_uri = 'mongodb://' + host = module.params.get('host') + port = module.params.get('port') + username = module.params.get('username') + password = module.params.get('password') + auth_database = module.params.get('auth_database') + + if (username and not password) or (password and not username): + module.fail_json(msg="Must provide both username and password or neither.") + + if username: + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) + + mongo_uri += "{}:{}".format(quote_plus(host),port) + + if auth_database: + mongo_uri += '/{}'.format(quote_plus(auth_database)) + + client = MongoClient(mongo_uri) + + # This checks to see if you have a replSetName configured + # This generally means that /etc/mongod.conf has been changed + # from the default to use a replica set and mongo has been + # restarted to use it. + + try: + repl_set = client.admin.command('getCmdLineOpts')['parsed']['replication']['replSetName'] + except (OperationFailure, KeyError): + module.exit_json(changed=False) + + if repl_set: + status = client.admin.command("replSetGetStatus") + # Not using `replSetGetConfig` because it's not supported in MongoDB 2.x. + rs_config = client.local.system.replset.find_one() + else: + module.exit_json(changed=False) + + # This converts the bson into a python dictionary that ansible's standard + # jsonify function can process and output without throwing errors on bson + # types that don't exist in JSON + status = json.loads(json_util.dumps(status)) + rs_config = json.loads(json_util.dumps(rs_config)) + + # Status contains information about Primary/Secondary, so we iterate that list + # But we want to return config for that host, not status (since this is the config module), + # this is the inner loop of the comprehension, where we match on the hostname (could also + # match on _id). + primary = [ c for m in status['members'] if m['stateStr'] == 'PRIMARY' for c in rs_config['members'] if m['name'] == c['host'] ] + secondary = [ c for m in status['members'] if m['stateStr'] == 'SECONDARY' for c in rs_config['members'] if m['name'] == c['host'] ] + # we're parsing the config directly here, much simpler + hidden = [ m for m in rs_config['members'] if m['hidden'] ] + + module.exit_json(changed=False, primary=primary, secondary=secondary, hidden=hidden, config=rs_config) + +if __name__ == '__main__': + main() + diff --git a/playbooks/library/mongodb_rs_status b/playbooks/library/mongodb_rs_status new file mode 100644 index 00000000000..45e39437620 --- /dev/null +++ b/playbooks/library/mongodb_rs_status @@ -0,0 +1,133 @@ +#!/usr/bin/env python + +from __future__ import absolute_import +from six.moves import map +DOCUMENTATION = """ +--- +module: mongodb_rs_status +short_description: Get the status of a replica set of a mongo cluster. +description: + - Get the status of the replica set of a mongo cluster. Provide the same info as rs.status() or replSetGetStatus. + Returns a status dictionary key containing the replica set JSON document from Mongo, or no status key if there + was no status found. This usually indicates that either Mongo was configured to run without replica sets or + that the replica set has not been initiated yet. +version_added: "1.9" +author: + - Feanil Patel + - Kevin Falcone +options: + host: + description: + - The hostname or ip of a server in the mongo cluster. + required: false + default: 'localhost' + port: + description: + - The port to connect to mongo on. + required: false + default: 27017 + username: + description: + - The username of the mongo user to connect as. + required: false + password: + description: + - The password to use when authenticating. + required: false + auth_database: + description: + - The database to authenticate against. + required: false +""" + +EXAMPLES = ''' +- name: Get status for the stage cluster + mongodb_rs_status: + host: localhost:27017 + username: root + password: password + register: mongo_status + +Note that you're testing for the presence of the status member of the dictionary not the contents of it + +- debug: msg="I don't have a replica set available" + when: mongo_status.status is not defined + +- debug: var=mongo_status.status + +''' +# Magic import +from ansible.module_utils.basic import * + +try: + from pymongo import MongoClient + from pymongo.errors import OperationFailure + from bson import json_util +except ImportError: + pymongo_found = False +else: + pymongo_found = True + +import json +from six.moves.urllib.parse import quote_plus + +def main(): + + arg_spec = dict( + host=dict(required=False, type='str', default="localhost"), + port=dict(required=False, type='int', default=27017), + username=dict(required=False, type='str'), + password=dict(required=False, type='str'), + auth_database=dict(required=False, type='str') + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False) + + if not pymongo_found: + module.fail_json(msg="The python pymongo module is not installed.") + + mongo_uri = 'mongodb://' + host = module.params.get('host') + port = module.params.get('port') + username = module.params.get('username') + password = module.params.get('password') + auth_database = module.params.get('auth_database') + + if (username and not password) or (password and not username): + module.fail_json(msg="Must provide both username and password or neither.") + + if username: + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) + + mongo_uri += "{}:{}".format(quote_plus(host),port) + + if auth_database: + mongo_uri += '/{}'.format(quote_plus(auth_database)) + + client = MongoClient(mongo_uri) + + # This checks to see if you have a replSetName configured + # This generally means that /etc/mongod.conf has been changed + # from the default to use a replica set and mongo has been + # restarted to use it. + + try: + repl_set = client.admin.command('getCmdLineOpts')['parsed']['replication']['replSetName'] + except (OperationFailure, KeyError): + module.exit_json(changed=False) + + # If mongo was started with a repl_set, it is safe to run replSetGetStatus + if repl_set: + status = client.admin.command("replSetGetStatus") + else: + module.exit_json(changed=False) + + # This converts the bson into a python dictionary that ansible's standard + # jsonify function can process and output without throwing errors on bson + # types that don't exist in JSON + clean = json.loads(json_util.dumps(status)) + + module.exit_json(changed=False, status=clean) + +if __name__ == '__main__': + main() diff --git a/playbooks/library/mongodb_step_down b/playbooks/library/mongodb_step_down new file mode 100644 index 00000000000..a531801b192 --- /dev/null +++ b/playbooks/library/mongodb_step_down @@ -0,0 +1,121 @@ +#!/usr/bin/env python + +from __future__ import absolute_import +from six.moves import map +from six.moves import range +DOCUMENTATION = """ +--- +module: mongodb_step_down +short_description: Issues a stepdown on the primary. +description: + - Issues replSetStepDown on the host provided in host:. + Afterwards, loops several times to ensure that a new primary is elected and that + it is a different host than the previous primary. Errors if the stepdown fails + or if the cluster fails to elect a new primary. +version_added: "1.9" +author: + - Kevin Falcone +options: + host: + description: + - The hostname or ip of a server in the mongo cluster. + required: false + default: 'localhost' + port: + description: + - The port to connect to mongo on. + required: false + default: 27017 + username: + description: + - The username of the mongo user to connect as. + required: false + password: + description: + - The password to use when authenticating. + required: false + auth_database: + description: + - The database to authenticate against. + required: false +""" + +EXAMPLES = ''' +- name: Get status for the stage cluster + mongodb_step_down + host: localhost:27017 + username: root + password: password + +''' +# Magic import +from ansible.module_utils.basic import * + +try: + from pymongo import MongoClient + from pymongo.errors import AutoReconnect + from bson import json_util +except ImportError: + pymongo_found = False +else: + pymongo_found = True + +import json +from six.moves.urllib.parse import quote_plus + +def main(): + + arg_spec = dict( + host=dict(required=False, type='str', default="localhost"), + port=dict(required=False, type='int', default=27017), + username=dict(required=False, type='str'), + password=dict(required=False, type='str'), + auth_database=dict(required=False, type='str') + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False) + + if not pymongo_found: + module.fail_json(msg="The python pymongo module is not installed.") + + mongo_uri = 'mongodb://' + host = module.params.get('host') + port = module.params.get('port') + username = module.params.get('username') + password = module.params.get('password') + auth_database = module.params.get('auth_database') + + if (username and not password) or (password and not username): + module.fail_json(msg="Must provide both username and password or neither.") + + if username: + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) + + mongo_uri += "{}:{}".format(quote_plus(host),port) + + if auth_database: + mongo_uri += '/{}'.format(quote_plus(auth_database)) + + client = MongoClient(mongo_uri) + + # This has no return since it forces a disconnect or throws an error + # about being unable to elect a secondary. We only catch the AutoReconnect + # so we see any other errors bubble up. + try: + client.admin.command("replSetStepDown",60,secondaryCatchUpPeriodSecs=30) + except AutoReconnect: + pass + + for i in range(5): + status = client.admin.command("replSetGetStatus") + primary = [m for m in status['members'] if m['stateStr'] == 'PRIMARY'] + # This won't work as well if you mix hostnames and IPs in your cluster. + # We use only IPs. + if primary and primary[0]['name'] != "{}:{}".format(quote_plus(host),port): + module.exit_json(changed=True, stepdown=True) + time.sleep(2) + + module.fail_json(msg="Unable to step down {}".format(host)) + +if __name__ == '__main__': + main() diff --git a/playbooks/library/rds_local b/playbooks/library/rds_local new file mode 100644 index 00000000000..e9f58276efc --- /dev/null +++ b/playbooks/library/rds_local @@ -0,0 +1,1087 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import +DOCUMENTATION = ''' +--- +module: rds +version_added: "1.3" +short_description: create, delete, or modify an Amazon rds instance +description: + - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0) +options: + command: + description: + - Specifies the action to take. The 'reboot' option is available starting at version 2.0 + required: true + choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] + instance_name: + description: + - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot + required: false + default: null + source_instance: + description: + - Name of the database to replicate. Used only when command=replicate. + required: false + default: null + db_engine: + description: + - The type of database. Used only when command=create. + required: false + default: null + choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] + size: + description: + - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. + required: false + default: null + instance_type: + description: + - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. + required: false + default: null + username: + description: + - Master database username. Used only when command=create. + required: false + default: null + password: + description: + - Password for the master database username. Used only when command=create or command=modify. + required: false + default: null + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: true + aliases: [ 'aws_region', 'ec2_region' ] + db_name: + description: + - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. + required: false + default: null + engine_version: + description: + - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. + required: false + default: null + parameter_group: + description: + - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. + required: false + default: null + license_model: + description: + - The license model for this DB instance. Used only when command=create or command=restore. + required: false + default: null + choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] + multi_zone: + description: + - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. + choices: [ "yes", "no" ] + required: false + default: null + iops: + description: + - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. + required: false + default: null + security_groups: + description: + - Comma separated list of one or more security groups. Used only when command=create or command=modify. + required: false + default: null + vpc_security_groups: + description: + - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. + required: false + default: null + port: + description: + - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. + required: false + default: null + upgrade: + description: + - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. + required: false + default: no + choices: [ "yes", "no" ] + option_group: + description: + - The name of the option group to use. If not specified then the default option group is used. Used only when command=create. + required: false + default: null + maint_window: + description: + - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." + required: false + default: null + backup_window: + description: + - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. + required: false + default: null + backup_retention: + description: + - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." + required: false + default: null + zone: + description: + - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. + required: false + default: null + aliases: ['aws_zone', 'ec2_zone'] + subnet: + description: + - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. + required: false + default: null + snapshot: + description: + - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. + required: false + default: null + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + wait: + description: + - When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. + required: false + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 + apply_immediately: + description: + - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. + default: no + choices: [ "yes", "no" ] + force_failover: + description: + - Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover. + required: false + default: "no" + choices: [ "yes", "no" ] + version_added: "2.0" + new_instance_name: + description: + - Name to rename an instance to. Used only when command=modify. + required: false + default: null + version_added: "1.5" + character_set_name: + description: + - Associate the DB instance with a specified character set. Used with command=create. + required: false + default: null + version_added: "1.9" + publicly_accessible: + description: + - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 + required: false + default: null + version_added: "1.9" + tags: + description: + - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 + required: false + default: null + version_added: "1.9" +requirements: + - "python >= 2.6" + - "boto" +author: + - "Bruce Pennypacker (@bpennypacker)" + - "Will Thames (@willthames)" + +''' + +# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD + +EXAMPLES = ''' +# Basic mysql provisioning example +- rds: + command: create + instance_name: new-database + db_engine: MySQL + size: 10 + instance_type: db.m1.small + username: mysql_admin + password: 1nsecure + tags: + Environment: testing + Application: cms + +# Create a read-only replica and wait for it to become available +- rds: + command: replicate + instance_name: new-database-replica + source_instance: new_database + wait: yes + wait_timeout: 600 + +# Delete an instance, but create a snapshot before doing so +- rds: + command: delete + instance_name: new-database + snapshot: new_database_snapshot + +# Get facts about an instance +- rds: + command: facts + instance_name: new-database + register: new_database_facts + +# Rename an instance and wait for the change to take effect +- rds: + command: modify + instance_name: new-database + new_instance_name: renamed-database + wait: yes + +# Reboot an instance and wait for it to become available again +- rds + command: reboot + instance_name: database + wait: yes + +# Restore a Postgres db instance from a snapshot, wait for it to become available again, and +# then modify it to add your security group. Also, display the new endpoint. +# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI +- local_action: + module: rds + command: restore + snapshot: mypostgres-snapshot + instance_name: MyNewInstanceName + region: us-west-2 + zone: us-west-2b + subnet: default-vpc-xx441xxx + publicly_accessible: yes + wait: yes + wait_timeout: 600 + tags: + Name: pg1_test_name_tag + register: rds + +- local_action: + module: rds + command: modify + instance_name: MyNewInstanceName + region: us-west-2 + vpc_security_groups: sg-xxx945xx + +- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}" + +''' + +import sys +import time + +try: + import boto.rds + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto.rds2 + has_rds2 = True +except ImportError: + has_rds2 = False + + +class RDSException(Exception): + def __init__(self, exc): + if hasattr(exc, 'error_message') and exc.error_message: + self.message = exc.error_message + self.code = exc.error_code + elif hasattr(exc, 'body') and 'Error' in exc.body: + self.message = exc.body['Error']['Message'] + self.code = exc.body['Error']['Code'] + else: + self.message = str(exc) + self.code = 'Unknown Error' + + +class RDSConnection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) + except boto.exception.BotoServerError as e: + module.fail_json(msg=e.error_message) + + def get_db_instance(self, instancename): + try: + return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) + except boto.exception.BotoServerError as e: + return None + + def get_db_snapshot(self, snapshotid): + try: + return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) + except boto.exception.BotoServerError as e: + return None + + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + params['engine'] = db_engine + try: + result = self.connection.create_dbinstance(instance_name, size, instance_class, + username, password, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + try: + result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_instance(self, instance_name, **params): + try: + result = self.connection.delete_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_snapshot(self, snapshot): + try: + result = self.connection.delete_dbsnapshot(snapshot) + return RDSSnapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def modify_db_instance(self, instance_name, **params): + try: + result = self.connection.modify_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_dbinstance(instance_name) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + try: + result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_snapshot(self, snapshot, instance_name, **params): + try: + result = self.connection.create_dbsnapshot(snapshot, instance_name) + return RDSSnapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def promote_read_replica(self, instance_name, **params): + try: + result = self.connection.promote_read_replica(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + +class RDS2Connection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) + except boto.exception.BotoServerError as e: + module.fail_json(msg=e.error_message) + + def get_db_instance(self, instancename): + try: + dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] + result = RDS2DBInstance(dbinstances[0]) + return result + except boto.rds2.exceptions.DBInstanceNotFound as e: + return None + except Exception as e: + raise e + + def get_db_snapshot(self, snapshotid): + try: + snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] + result = RDS2Snapshot(snapshots[0]) + return result + except boto.rds2.exceptions.DBSnapshotNotFound as e: + return None + + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + try: + result = self.connection.create_db_instance(instance_name, size, instance_class, + db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + try: + result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_instance(self, instance_name, **params): + try: + result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_snapshot(self, snapshot): + try: + result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def modify_db_instance(self, instance_name, **params): + try: + result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + try: + result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_snapshot(self, snapshot, instance_name, **params): + try: + result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def promote_read_replica(self, instance_name, **params): + try: + result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + +class RDSDBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + self.name = dbinstance.id + self.status = dbinstance.status + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.instance.create_time, + 'status' : self.status, + 'availability_zone' : self.instance.availability_zone, + 'backup_retention' : self.instance.backup_retention_period, + 'backup_window' : self.instance.preferred_backup_window, + 'maintenance_window' : self.instance.preferred_maintenance_window, + 'multi_zone' : self.instance.multi_az, + 'instance_type' : self.instance.instance_class, + 'username' : self.instance.master_username, + 'iops' : self.instance.iops + } + + # Endpoint exists only if the instance is available + if self.status == 'available': + d["endpoint"] = self.instance.endpoint[0] + d["port"] = self.instance.endpoint[1] + if self.instance.vpc_security_groups is not None: + d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups) + else: + d["vpc_security_groups"] = None + else: + d["endpoint"] = None + d["port"] = None + d["vpc_security_groups"] = None + + # ReadReplicaSourceDBInstanceIdentifier may or may not exist + try: + d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier + except Exception as e: + d["replication_source"] = None + return d + + + + +class RDS2DBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + if 'DBInstanceIdentifier' not in dbinstance: + self.name = None + else: + self.name = self.instance.get('DBInstanceIdentifier') + self.status = self.instance.get('DBInstanceStatus') + + def get_data(self): + d = { + 'id': self.name, + 'create_time': self.instance['InstanceCreateTime'], + 'status': self.status, + 'availability_zone': self.instance['AvailabilityZone'], + 'backup_retention': self.instance['BackupRetentionPeriod'], + 'maintenance_window': self.instance['PreferredMaintenanceWindow'], + 'multi_zone': self.instance['MultiAZ'], + 'instance_type': self.instance['DBInstanceClass'], + 'username': self.instance['MasterUsername'], + 'iops': self.instance['Iops'], + 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier'] + } + if self.instance["VpcSecurityGroups"] is not None: + d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups']) + if self.status == 'available': + d['endpoint'] = self.instance["Endpoint"]["Address"] + d['port'] = self.instance["Endpoint"]["Port"] + else: + d['endpoint'] = None + d['port'] = None + + return d + + +class RDSSnapshot: + def __init__(self, snapshot): + self.snapshot = snapshot + self.name = snapshot.id + self.status = snapshot.status + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.snapshot.snapshot_create_time, + 'status' : self.status, + 'availability_zone' : self.snapshot.availability_zone, + 'instance_id' : self.snapshot.instance_id, + 'instance_created' : self.snapshot.instance_create_time, + } + # needs boto >= 2.21.0 + if hasattr(self.snapshot, 'snapshot_type'): + d["snapshot_type"] = self.snapshot.snapshot_type + if hasattr(self.snapshot, 'iops'): + d["iops"] = self.snapshot.iops + return d + + +class RDS2Snapshot: + def __init__(self, snapshot): + if 'DeleteDBSnapshotResponse' in snapshot: + self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + else: + self.snapshot = snapshot + self.name = self.snapshot.get('DBSnapshotIdentifier') + self.status = self.snapshot.get('Status') + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.snapshot['SnapshotCreateTime'], + 'status' : self.status, + 'availability_zone' : self.snapshot['AvailabilityZone'], + 'instance_id' : self.snapshot['DBInstanceIdentifier'], + 'instance_created' : self.snapshot['InstanceCreateTime'], + 'snapshot_type' : self.snapshot['SnapshotType'], + 'iops' : self.snapshot['Iops'], + } + return d + + +def await_resource(conn, resource, status, module): + wait_timeout = module.params.get('wait_timeout') + time.time() + while wait_timeout > time.time() and resource.status != status: + time.sleep(5) + if wait_timeout <= time.time(): + module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name) + if module.params.get('command') == 'snapshot': + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) + resource = conn.get_db_snapshot(resource.name) + else: + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) + resource = conn.get_db_instance(resource.name) + if resource is None: + break + return resource + + +def create_db_instance(module, conn): + subnet = module.params.get('subnet') + required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password'] + valid_vars = ['backup_retention', 'backup_window', + 'character_set_name', 'db_name', 'engine_version', + 'instance_type', 'iops', 'license_model', 'maint_window', + 'multi_zone', 'option_group', 'parameter_group','port', + 'subnet', 'upgrade', 'zone'] + if module.params.get('subnet'): + valid_vars.append('vpc_security_groups') + else: + valid_vars.append('security_groups') + if has_rds2: + valid_vars.extend(['publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: + try: + result = conn.create_db_instance(instance_name, module.params.get('size'), + module.params.get('instance_type'), module.params.get('db_engine'), + module.params.get('username'), module.params.get('password'), **params) + changed = True + except RDSException as e: + module.fail_json(msg="Failed to create instance: %s" % e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def replicate_db_instance(module, conn): + required_vars = ['instance_name', 'source_instance'] + valid_vars = ['instance_type', 'port', 'upgrade', 'zone'] + if has_rds2: + valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + source_instance = module.params.get('source_instance') + + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: + try: + result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) + changed = True + except RDSException as e: + module.fail_json(msg="Failed to create replica instance: %s " % e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def delete_db_instance_or_snapshot(module, conn): + required_vars = [] + valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + + if not instance_name: + result = conn.get_db_snapshot(snapshot) + else: + result = conn.get_db_instance(instance_name) + if not result: + module.exit_json(changed=False) + if result.status == 'deleting': + module.exit_json(changed=False) + try: + if instance_name: + if snapshot: + params["skip_final_snapshot"] = False + if has_rds2: + params["final_db_snapshot_identifier"] = snapshot + else: + params["final_snapshot_id"] = snapshot + else: + params["skip_final_snapshot"] = True + result = conn.delete_db_instance(instance_name, **params) + else: + result = conn.delete_db_snapshot(snapshot) + except RDSException as e: + module.fail_json(msg="Failed to delete instance: %s" % e.message) + + # If we're not waiting for a delete to complete then we're all done + # so just return + if not module.params.get('wait'): + module.exit_json(changed=True) + try: + resource = await_resource(conn, result, 'deleted', module) + module.exit_json(changed=True) + except RDSException as e: + if e.code == 'DBInstanceNotFound': + module.exit_json(changed=True) + else: + module.fail_json(msg=e.message) + except Exception as e: + module.fail_json(msg=str(e)) + + +def facts_db_instance_or_snapshot(module, conn): + required_vars = [] + valid_vars = ['instance_name', 'snapshot'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + + if instance_name and snapshot: + module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both") + if instance_name: + resource = conn.get_db_instance(instance_name) + if not resource: + module.fail_json(msg="DB instance %s does not exist" % instance_name) + if snapshot: + resource = conn.get_db_snapshot(snapshot) + if not resource: + module.fail_json(msg="DB snapshot %s does not exist" % snapshot) + + module.exit_json(changed=False, instance=resource.get_data()) + + +def modify_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['apply_immediately', 'backup_retention', 'backup_window', + 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', + 'maint_window', 'multi_zone', 'new_instance_name', + 'option_group', 'parameter_group', 'password', 'size', 'upgrade'] + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + new_instance_name = module.params.get('new_instance_name') + + try: + result = conn.modify_db_instance(instance_name, **params) + except RDSException as e: + module.fail_json(msg=e.message) + if params.get('apply_immediately'): + if new_instance_name: + # Wait until the new instance name is valid + new_instance = None + while not new_instance: + new_instance = conn.get_db_instance(new_instance_name) + time.sleep(5) + + # Found instance but it briefly flicks to available + # before rebooting so let's wait until we see it rebooting + # before we check whether to 'wait' + result = await_resource(conn, new_instance, 'rebooting', module) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + # guess that this changed the DB, need a way to check + module.exit_json(changed=True, instance=resource.get_data()) + + +def promote_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['backup_retention', 'backup_window'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if not result: + module.fail_json(msg="DB Instance %s does not exist" % instance_name) + + if result.get_data().get('replication_source'): + try: + result = conn.promote_read_replica(instance_name, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + else: + changed = False + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def snapshot_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['tags'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + changed = False + result = conn.get_db_snapshot(snapshot) + if not result: + try: + result = conn.create_db_snapshot(snapshot, instance_name, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_snapshot(snapshot) + + module.exit_json(changed=changed, snapshot=resource.get_data()) + + +def reboot_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = [] + + if has_rds2: + valid_vars.append('force_failover') + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + result = conn.get_db_instance(instance_name) + changed = False + try: + result = conn.reboot_db_instance(instance_name, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def restore_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', + 'option_group', 'port', 'publicly_accessible', + 'subnet', 'tags', 'upgrade', 'zone'] + if has_rds2: + valid_vars.append('instance_type') + else: + required_vars.append('instance_type') + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + instance_type = module.params.get('instance_type') + snapshot = module.params.get('snapshot') + + changed = False + result = conn.get_db_instance(instance_name) + if not result: + try: + result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def validate_parameters(required_vars, valid_vars, module): + command = module.params.get('command') + for v in required_vars: + if not module.params.get(v): + module.fail_json(msg="Parameter %s required for %s command" % (v, command)) + + # map to convert rds module options to boto rds and rds2 options + optional_params = { + 'port': 'port', + 'db_name': 'db_name', + 'zone': 'availability_zone', + 'maint_window': 'preferred_maintenance_window', + 'backup_window': 'preferred_backup_window', + 'backup_retention': 'backup_retention_period', + 'multi_zone': 'multi_az', + 'engine_version': 'engine_version', + 'upgrade': 'auto_minor_version_upgrade', + 'subnet': 'db_subnet_group_name', + 'license_model': 'license_model', + 'option_group': 'option_group_name', + 'size': 'allocated_storage', + 'iops': 'iops', + 'new_instance_name': 'new_instance_id', + 'apply_immediately': 'apply_immediately', + } + # map to convert rds module options to boto rds options + optional_params_rds = { + 'db_engine': 'engine', + 'password': 'master_password', + 'parameter_group': 'param_group', + 'instance_type': 'instance_class', + } + # map to convert rds module options to boto rds2 options + optional_params_rds2 = { + 'tags': 'tags', + 'publicly_accessible': 'publicly_accessible', + 'parameter_group': 'db_parameter_group_name', + 'character_set_name': 'character_set_name', + 'instance_type': 'db_instance_class', + 'password': 'master_user_password', + 'new_instance_name': 'new_db_instance_identifier', + 'force_failover': 'force_failover', + } + if has_rds2: + optional_params.update(optional_params_rds2) + sec_group = 'db_security_groups' + else: + optional_params.update(optional_params_rds) + sec_group = 'security_groups' + # Check for options only supported with rds2 + for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()): + if module.params.get(k): + module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k) + + params = {} + for (k, v) in optional_params.items(): + if module.params.get(k) and k not in required_vars: + if k in valid_vars: + params[v] = module.params[k] + else: + module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command)) + + if module.params.get('security_groups'): + params[sec_group] = module.params.get('security_groups').split(',') + + vpc_groups = module.params.get('vpc_security_groups') + if vpc_groups: + if has_rds2: + params['vpc_security_group_ids'] = vpc_groups + else: + groups_list = [] + for x in vpc_groups: + groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) + params['vpc_security_groups'] = groups_list + + # Convert tags dict to list of tuples that rds2 expects + if 'tags' in params: + params['tags'] = list(module.params['tags'].items()) + return params + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), + instance_name = dict(required=False), + source_instance = dict(required=False), + db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), + size = dict(required=False), + instance_type = dict(aliases=['type'], required=False), + username = dict(required=False), + password = dict(no_log=True, required=False), + db_name = dict(required=False), + engine_version = dict(required=False), + parameter_group = dict(required=False), + license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False), + multi_zone = dict(type='bool', default=False), + iops = dict(required=False), + security_groups = dict(required=False), + vpc_security_groups = dict(type='list', required=False), + port = dict(required=False), + upgrade = dict(type='bool', default=False), + option_group = dict(required=False), + maint_window = dict(required=False), + backup_window = dict(required=False), + backup_retention = dict(required=False), + zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), + subnet = dict(required=False), + wait = dict(type='bool', default=False), + wait_timeout = dict(type='int', default=300), + snapshot = dict(required=False), + apply_immediately = dict(type='bool', default=False), + new_instance_name = dict(required=False), + tags = dict(type='dict', required=False), + publicly_accessible = dict(required=False), + character_set_name = dict(required=False), + force_failover = dict(type='bool', required=False, default=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + invocations = { + 'create': create_db_instance, + 'replicate': replicate_db_instance, + 'delete': delete_db_instance_or_snapshot, + 'facts': facts_db_instance_or_snapshot, + 'modify': modify_db_instance, + 'promote': promote_db_instance, + 'snapshot': snapshot_db_instance, + 'reboot': reboot_db_instance, + 'restore': restore_db_instance, + } + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.") + + # connect to the rds endpoint + if has_rds2: + conn = RDS2Connection(module, region, **aws_connect_params) + else: + conn = RDSConnection(module, region, **aws_connect_params) + + invocations[module.params.get('command')](module, conn) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/playbooks/library/supervisorctl_local b/playbooks/library/supervisorctl_local deleted file mode 100644 index aa4a52832bc..00000000000 --- a/playbooks/library/supervisorctl_local +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -import os - -DOCUMENTATION = ''' ---- -module: supervisorctl -short_description: Manage the state of a program or group of programs running via Supervisord -description: - - Manage the state of a program or group of programs running via I(Supervisord) -version_added: "0.7" -options: - name: - description: - - The name of the I(supervisord) program/process to manage - required: true - default: null - config: - description: - - configuration file path, passed as -c to supervisorctl - required: false - default: null - version_added: "1.3" - server_url: - description: - - URL on which supervisord server is listening, passed as -s to supervisorctl - required: false - default: null - version_added: "1.3" - username: - description: - - username to use for authentication with server, passed as -u to supervisorctl - required: false - default: null - version_added: "1.3" - password: - description: - - password to use for authentication with server, passed as -p to supervisorctl - required: false - default: null - version_added: "1.3" - state: - description: - - The state of service - required: true - default: null - choices: [ "present", "started", "stopped", "restarted" ] - supervisorctl_path: - description: - - Path to supervisorctl executable to use - required: false - default: null - version_added: "1.4" -requirements: - - supervisorctl -requirements: [ ] -author: Matt Wright -''' - -EXAMPLES = ''' -# Manage the state of program to be in 'started' state. -- supervisorctl: name=my_app state=started - -# Restart my_app, reading supervisorctl configuration from a specified file. -- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf - -# Restart my_app, connecting to supervisord with credentials and server URL. -- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001 - -''' - -def main(): - arg_spec = dict( - name=dict(required=True), - config=dict(required=False), - server_url=dict(required=False), - username=dict(required=False), - password=dict(required=False), - supervisorctl_path=dict(required=False), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - state = module.params['state'] - config = module.params.get('config') - server_url = module.params.get('server_url') - username = module.params.get('username') - password = module.params.get('password') - supervisorctl_path = module.params.get('supervisorctl_path') - - if supervisorctl_path: - supervisorctl_path = os.path.expanduser(supervisorctl_path) - if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path): - supervisorctl_args = [ supervisorctl_path ] - else: - module.fail_json(msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) - else: - supervisorctl_args = [ module.get_bin_path('supervisorctl', True) ] - - if config: - supervisorctl_args.extend(['-c', os.path.expanduser(config)]) - if server_url: - supervisorctl_args.extend(['-s', server_url]) - if username: - supervisorctl_args.extend(['-u', username]) - if password: - supervisorctl_args.extend(['-p', password]) - - def run_supervisorctl(cmd, name=None, **kwargs): - args = list(supervisorctl_args) # copy the master args - args.append(cmd) - if name: - args.append(name) - return module.run_command(args, **kwargs) - - rc, out, err = run_supervisorctl('status') - present = name in out - - if state == 'present': - if not present: - if module.check_mode: - module.exit_json(changed=True) - run_supervisorctl('reread', check_rc=True) - rc, out, err = run_supervisorctl('add', name) - - if '%s: added process group' % name in out: - module.exit_json(changed=True, name=name, state=state) - else: - module.fail_json(msg=out, name=name, state=state) - - module.exit_json(changed=False, name=name, state=state) - - rc, out, err = run_supervisorctl('status', name) - running = 'RUNNING' in out - - if running and state == 'started': - module.exit_json(changed=False, name=name, state=state) - - if running and state == 'stopped': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = run_supervisorctl('stop', name) - - if '%s: stopped' % name in out: - module.exit_json(changed=True, name=name, state=state) - - module.fail_json(msg=out) - - elif state == 'restarted': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = run_supervisorctl('update', name) - rc, out, err = run_supervisorctl('restart', name) - - if '%s: started' % name in out: - module.exit_json(changed=True, name=name, state=state) - - module.fail_json(msg=out) - - elif not running and state == 'started': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = run_supervisorctl('start',name) - - if '%s: started' % name in out: - module.exit_json(changed=True, name=name, state=state) - elif '%s: ERROR (already started)' % name in out: - # addresses a race condition if update is called - # immediately before started and the service is set - # to start automatically - module.exit_json(changed=False, name=name, state=state) - - module.fail_json(msg=out) - - module.exit_json(changed=False, name=name, state=state) - -# this is magic, see lib/ansible/module_common.py -#<> - -main() diff --git a/playbooks/library/util_map b/playbooks/library/util_map new file mode 100755 index 00000000000..ef5b60d2178 --- /dev/null +++ b/playbooks/library/util_map @@ -0,0 +1,185 @@ +#!/usr/bin/env python + +from __future__ import absolute_import +DOCUMENTATION = """ +--- +module: util_map +short_description: Applies a function to input and returns the result with the key function_output +description: + - Applies functions to data structures returning a new data structure. +version_added: "1.8" +author: Edward Zarecor +options: + function: + description: + - The function to apply, currently ['zip_to_dict','flatten'] + required: true + input: + description: + - The input + required: true + args: + description: + - Arguments to the function other than the input, varies by function. +""" + +EXAMPLES = ''' +- name: Apply function to results from ec2_scaling_policies + util_map: + function: 'zip_to_dict' + input: "{{ created_policies.results }}" + args: + - "name" + - "arn" + register: policy_data + +- name: Apply function to policy data + util_map: + function: 'flatten' + input: + - 'a' + - 'b' + - 'c' + - ['d','e','f'] + register: flat_list +''' + +from ansible.module_utils.basic import * +import ast +import itertools + +class ArgumentError(Exception): + pass + +def flatten(module, input): + """ + Takes an iterable and returns a flat list + + With the input of + + [['a','b','c'],'d',['e','f','g'],{'a','b'}] + + this function will return + + ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'a', 'b'] + + :param module: The ansible module + :param input: An iterable + :return: A flat list. + """ + + try: + flat = list(itertools.chain.from_iterable(input)) + except TypeError as te: + raise ArgumentError("Flatten resulted in a type error, {0}.".format(te.message)) + + module.exit_json(function_output = flat) + +def zip_to_dict(module, input, key_key, value_key): + """ + Takes an array of dicts and flattens it to a single dict by extracting the values + of a provided key as the keys in the new dict and the values of the second + provided key as the corresponding values. + + For example, the input dict of + + [{'name':'fred', 'id':'123'},{'name':'bill', 'id':'321'}] + + with an args array of ['id','name'] + + would return + + {'123':'fred','321':'bill'} + + :param input: an array of dicts, typically the results of an ansible module + :param key_key: a key into the input dict returning a value to be used as a key in the flattened dict + :param value_key: a key into the input dict returning a value to be used as a value in the flattened dict + :return: the flattened dict + """ + + results = {} + + for item in input: + results[item[key_key]]=item[value_key] + + module.exit_json(function_output = results) + +def zip_to_list(module, input, key): + """ + Takes an array of dicts and flattens it to a single list by extracting the value + of a provided key as an item in the new list. + + For example, the input list of dicts like + + [{'name':'fred', 'id':'123'},{'name':'bill', 'id':'321'}] + + with an args array of ['name'] + + would return + + ['fred','bill'] + + :param input: an array of dicts, typically the results of an ansible module + :param key: a key into the input dict returning a value to be used as an item in the flattend list + :return: the flattened list + """ + + results = [] + + for item in input: + results.append(item[key]) + + module.exit_json(function_output = results) + +def zip_to_listdict(module, input, key_name, value_name): + """ + Take an array of dicts and build a list of dicts where the name of the key + is taken as a value of one of the keys in the input list and the vaule is + the value of a different key. + + For example with an input like + [{'tag_name': 'Name', 'tag_value': 'stage-edx-edxapp'}, {'tag_name': 'cluster', 'tag_value': 'edxapp'}] + + and an args array of ['tag_name', 'tag_value'] + + would return + + [{'Name': 'stage-edx-edxapp'}, {'cluster': 'edxapp'}] + + NB: This is used to work around the fact that we can template out the names + of keys in dictionaries in ansible. + """ + + results = [] + for item in input: + results.append({ item[key_name]: item[value_name]}) + + module.exit_json(function_output=results) + +def main(): + + arg_spec = dict( + function=dict(required=True, type='str'), + input=dict(required=True, type='str'), + args=dict(required=False, type='list'), + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False) + + target = module.params.get('function') + # reify input data + input = ast.literal_eval(module.params.get('input')) + args = module.params.get('args') + + if target == 'zip_to_dict': + zip_to_dict(module, input, *args) + elif target == 'flatten': + flatten(module,input) + elif target == 'zip_to_list': + zip_to_list(module, input, *args) + elif target == 'zip_to_listdict': + zip_to_listdict(module, input, *args) + else: + raise NotImplemented("Function {0} is not implemented.".format(target)) + +main() diff --git a/playbooks/library/vpc_lookup b/playbooks/library/vpc_lookup deleted file mode 100644 index 80151392eb3..00000000000 --- a/playbooks/library/vpc_lookup +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: vpc_lookup -short_description: returns a list of subnet Ids using tags as criteria -description: - - Returns a list of subnet Ids for a given set of tags that identify one or more VPCs -version_added: "1.5" -options: - region: - description: - - The AWS region to use. Must be specified if ec2_url - is not used. If not specified then the value of the - EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of - the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the - AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - tags: - desription: - - tags to lookup - required: false - default: null - type: dict - aliases: [] - -requirements: [ "boto" ] -author: John Jarvis -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Return all instances that match the tag "Name: foo" -- local_action: - module: vpc_lookup - tags: - Name: foo -''' - -import sys - -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - -try: - from boto.vpc import VPCConnection - from boto.vpc import connect_to_region -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def main(): - - module=AnsibleModule( - argument_spec=dict( - region=dict(choices=AWS_REGIONS), - aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], - no_log=True), - aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), - tags=dict(default=None, type='dict'), - ) - ) - - tags = module.params.get('tags') - aws_secret_key = module.params.get('aws_secret_key') - aws_access_key = module.params.get('aws_access_key') - region = module.params.get('region') - - # If we have a region specified, connect to its endpoint. - if region: - try: - vpc = connect_to_region(region, aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - else: - module.fail_json(msg="region must be specified") - vpc_conn = VPCConnection() - subnet_ids = [] - for subnet in vpc_conn.get_all_subnets(filters={'tag:' + tag: value - for tag, value in tags.iteritems()}): - subnet_ids.append(subnet.id) - vpc_ids = [] - for vpc in vpc.get_all_vpcs(filters={'tag:' + tag: value - for tag, value in tags.iteritems()}): - vpc_ids.append(vpc.id) - - module.exit_json(changed=False, subnet_ids=subnet_ids, vpc_ids=vpc_ids) - - -# this is magic, see lib/ansible/module_common.py -#<> - -main() diff --git a/playbooks/library/wait_for b/playbooks/library/wait_for deleted file mode 100644 index 8644673eb50..00000000000 --- a/playbooks/library/wait_for +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import socket -import datetime -import time -import sys -import re - -DOCUMENTATION = ''' ---- -module: wait_for -short_description: Waits for a condition before continuing. -description: - - Waiting for a port to become available is useful for when services - are not immediately available after their init scripts return - - which is true of certain Java application servers. It is also - useful when starting guests with the M(virt) module and - needing to pause until they are ready. This module can - also be used to wait for a file to be available on the filesystem - or with a regex match a string to be present in a file. -version_added: "0.7" -options: - host: - description: - - hostname or IP address to wait for - required: false - default: "127.0.0.1" - aliases: [] - timeout: - description: - - maximum number of seconds to wait for - required: false - default: 300 - delay: - description: - - number of seconds to wait before starting to poll - required: false - default: 0 - port: - description: - - port number to poll - required: false - state: - description: - - either C(present), C(started), or C(stopped) - - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed - - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing - choices: [ "present", "started", "stopped" ] - default: "started" - path: - version_added: "1.4" - required: false - description: - - path to a file on the filesytem that must exist before continuing - search_regex: - version_added: "1.4" - required: false - description: - - with the path option can be used match a string in the file that must match before continuing. Defaults to a multiline regex. - -notes: [] -requirements: [] -author: Jeroen Hoekx, John Jarvis -''' - -EXAMPLES = ''' - -# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds -- wait_for: port=8000 delay=10" - -# wait until the file /tmp/foo is present before continuing -- wait_for: path=/tmp/foo - -# wait until the string "completed" is in the file /tmp/foo before continuing -- wait_for: path=/tmp/foo search_regex=completed - -''' - -def main(): - - module = AnsibleModule( - argument_spec = dict( - host=dict(default='127.0.0.1'), - timeout=dict(default=300), - connect_timeout=dict(default=5), - delay=dict(default=0), - port=dict(default=None), - path=dict(default=None), - search_regex=dict(default=None), - state=dict(default='started', choices=['started', 'stopped', 'present']), - ), - ) - - params = module.params - - host = params['host'] - timeout = int(params['timeout']) - connect_timeout = int(params['connect_timeout']) - delay = int(params['delay']) - if params['port']: - port = int(params['port']) - else: - port = None - state = params['state'] - path = params['path'] - search_regex = params['search_regex'] - - if port and path: - module.fail_json(msg="port and path parameter can not both be passed to wait_for") - if path and state == 'stopped': - module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module") - - start = datetime.datetime.now() - - if delay: - time.sleep(delay) - - if state == 'stopped': - ### first wait for the stop condition - end = start + datetime.timedelta(seconds=timeout) - - while datetime.datetime.now() < end: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - s.shutdown(socket.SHUT_RDWR) - s.close() - time.sleep(1) - except: - break - else: - elapsed = datetime.datetime.now() - start - module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) - - elif state in ['started', 'present']: - ### wait for start condition - end = start + datetime.timedelta(seconds=timeout) - while datetime.datetime.now() < end: - if path: - try: - with open(path) as f: - if search_regex: - if re.search(search_regex, f.read(), re.MULTILINE): - break - else: - time.sleep(1) - else: - break - except IOError: - time.sleep(1) - pass - elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - s.shutdown(socket.SHUT_RDWR) - s.close() - break - except: - time.sleep(1) - pass - else: - elapsed = datetime.datetime.now() - start - if port: - module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds) - elif path: - if search_regex: - module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds) - else: - module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds) - - - elapsed = datetime.datetime.now() - start - module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds) - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/playbooks/library_authoring.yml b/playbooks/library_authoring.yml new file mode 100644 index 00000000000..b5881bb25c0 --- /dev/null +++ b/playbooks/library_authoring.yml @@ -0,0 +1,16 @@ +- name: Deploy the Library Authoring MFE + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'library-authoring' + LIBRARY_AUTHORING_ENABLED: True + LIBRARY_AUTHORING_SANDBOX_BUILD: False + roles: + - role: library_authoring + MFE_NAME: library-authoring + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELICE_INFRASTRUCTURE \ No newline at end of file diff --git a/playbooks/license_manager.yml b/playbooks/license_manager.yml new file mode 100644 index 00000000000..691ef7ccbe4 --- /dev/null +++ b/playbooks/license_manager.yml @@ -0,0 +1,22 @@ +- name: Deploy edX License Manager + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'license_manager' + REGISTRAR_ENABLED: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - license_manager + - license_manager + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'license_manager' + when: REGISTRAR_HERMES_ENABLED diff --git a/playbooks/lifecycle_inventory.py b/playbooks/lifecycle_inventory.py new file mode 100755 index 00000000000..330f5d8b0e5 --- /dev/null +++ b/playbooks/lifecycle_inventory.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +""" +Build an ansible inventory based on autoscaling group instance lifecycle state. + +Outputs JSON to stdout with keys for each state and combination of autoscaling +group and state. + +{ + "InService": [ + "10.0.47.127", + "10.0.46.174" + ], + "Terminating:Wait": [ + "10.0.48.104" + ], + "e-d-CommonClusterServerAsGroup": [ + "10.0.47.127", + "10.0.46.174" + ], + "e-d-CommonClusterServerAsGroup_InService": [ + "10.0.47.127", + "10.0.46.174" + ], + "e-d-CommonClusterServerAsGroup_InService": [ + "10.0.48.104" + ] + +} +""" +import argparse +import boto3 +import json +from collections import defaultdict +from os import environ + +class LifecycleInventory(): + + def __init__(self, region): + parser = argparse.ArgumentParser() + self.region = region + + def get_e_d_from_tags(self, group): + + environment = "default_environment" + deployment = "default_deployment" + + for r in group['Tags']: + if r['Key'] == "environment": + environment = r['Value'] + elif r['Key'] == "deployment": + deployment = r['Value'] + return environment,deployment + + def get_instance_dict(self): + ec2 = boto3.client('ec2', region_name=self.region) + reservations = ec2.describe_instances()['Reservations'] + + dict = {} + + for instance in [i for r in reservations for i in r['Instances']]: + dict[instance['InstanceId']] = instance + + return dict + + def get_asgs(self): + asg = boto3.client('autoscaling', region_name=self.region) + asg_request = asg.describe_auto_scaling_groups() + asg_accumulator = asg_request['AutoScalingGroups'] + + while 'NextToken' in asg_request: + asg_request = asg.describe_auto_scaling_groups(NextToken=asg_request['NextToken']) + asg_accumulator.extend(asg_request['AutoScalingGroups']) + + return asg_accumulator + + def run(self): + + groups = self.get_asgs() + + instances = self.get_instance_dict() + inventory = defaultdict(list) + + for group in groups: + + for instance in group['Instances']: + + private_ip_address = instances[instance['InstanceId']]['PrivateIpAddress'] + if private_ip_address: + environment,deployment = self.get_e_d_from_tags(group) + inventory[environment + "_" + deployment + "_" + instance['LifecycleState'].replace(":","_")].append(private_ip_address) + inventory[group['AutoScalingGroupName']].append(private_ip_address) + inventory[group['AutoScalingGroupName'] + "_" + instance['LifecycleState'].replace(":","_")].append(private_ip_address) + inventory[instance['LifecycleState'].replace(":","_")].append(private_ip_address) + + print(json.dumps(inventory, sort_keys=True, indent=2)) + +if __name__=="__main__": + + parser = argparse.ArgumentParser() + parser.add_argument('-r', '--region', help='The aws region to use when connecting.', default=environ.get('AWS_REGION', 'us-east-1')) + parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='http://github.com/store_true', default=True) + args = parser.parse_args() + + + LifecycleInventory(args.region).run() diff --git a/playbooks/log_server.yml b/playbooks/log_server.yml new file mode 100644 index 00000000000..0e7fff48d92 --- /dev/null +++ b/playbooks/log_server.yml @@ -0,0 +1,15 @@ +--- +# Build a kibana/logstash/elasticsearch server for capturing and +# analyzing logs. +- name: Configure syslog server + hosts: all + become: True + roles: + - common + - oraclejdk + - elasticsearch + - logstash + - kibana + - role: nginx + nginx_sites: + - kibana diff --git a/playbooks/manage_edxapp_users_and_groups.yml b/playbooks/manage_edxapp_users_and_groups.yml new file mode 100644 index 00000000000..22b3b1d21e8 --- /dev/null +++ b/playbooks/manage_edxapp_users_and_groups.yml @@ -0,0 +1,285 @@ +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Usage: ansible-playbook -i edxapp-host-1, -e@/path/to/configfile-of-users-andor-groups +# -e 'group_environment=prod-edge' +# Overview: +# This playbook ensures that the specified users and groups exist in the targeted +# edxapp cluster. +# +# Users have the following properties: +# - username (required, str) +# - email (required, str) +# - initial_password_hash (optional, str) +# - remove (optional, bool): ensures the user does not exist +# - staff (optional, bool) +# - superuser (optional, bool) +# - unusable_password (optional, bool): ensures the password is unusable +# - groups (optional, list[str]) +# - _comment (optional, str): ignored +# +# Groups can have the following properties: +# - name (required, str) +# - permissions (required, list[str]) +# - remove (optional, bool): ensures the group does not exist +# - _comment (optional, str): ignored +# +# Example: +# +# django_users: +# +# - username: bobby +# email: bobby@droptabl.es +# staff: true +# superuser: true +# groups: +# - group1 +# - group2 +# +# - username: fred +# email: fred@smith +# remove: true +# +# - username: smitty +# email: smitty@werbenmanjens.en +# groups: +# - group1 +# _comment: | +# he was +# number one! +# +# - username: frank +# email: frank@bigcorp.com +# staff: false +# superuser: false +# unusable_password: true +# groups: [] +# +# - username: zoe +# email: zoe@example.com +# initial_password_hash: 'pbkdf2_sha256$20000$levJ6jdVYCsu$gdBLGf2DNPqfaKdcETXtFocRU8Kk+sMsIvKkmw1dKbY=' +# +# django_groups: +# +# - name: group1 +# permissions: +# - permission1 +# - permission2 +# +# - name: group3 +# remove: true +# permissions: [] +# _comment: | +# group3 is the best group +# yada yada +# +# Note: +# +# LMS and CMS do still share the edxapp database, and therefore share a +# User table and Group table. So, edxapp users are created in an LMS context +# but exist in CMS as well. +# +# However, some edxapp Django apps are only installed into CMS (but not LMS), +# and vice versa. In order to create a group and grant it permissions, +# the permissions must be from apps that are installed into the running +# service. So, to create groups for LMS-only apps, we must create groups +# in an LMS context, and to create groups for CMS-only apps, we must create +# groups in a CMS context. Thus, while users are managed jointly for LMS/CMS, +# groups are managed separately. +# +# That being said, note that the groups created in one service variant should be +# disjoint with those created the other, as the underlying Group table is shared. +# That is, each group name should be defined for LMS *or* CMS, not both. +# Otherwise, whichever group is created second will override the first one. +# +# Of course, the jointly-managed LMS/CMS users can be assigned to any combination +# of both LMS and CMS groups. Assigning users to CMS groups does in fact work through +# an LMS context, since the actual CMS permissions are not being referenced. +# +# Note: to get a list of all available permissions, run the following code within a Django shell: +# +# from django.contrib.auth.models import Permission +# for perm in Permission.objects.all(): +# print '{}:{}:{}'.format(perm.content_type.app_label, perm.content_type.model, perm.codename) +# +- hosts: all + vars: + env_path: /edx/app/edxapp/edxapp_env + python_path: /edx/app/edxapp/venvs/edxapp/bin/python + manage_path: /edx/bin/manage.edxapp + ignore_user_creation_errors: no + deployment_settings: "{{ EDXAPP_SETTINGS | default('production') }}" + group_environment: "" # By default, create groups for all envs (for backwards compatibility). + service: "" # Used to display the service name during execution with *-ida tags. Set using ansible-playbook -e. + vars_files: + - roles/common_vars/defaults/main.yml + tasks: + - name: Manage LMS groups + tags: + - manage-groups-lms + - manage-groups # Old tag pre-lms/cms-group-split, can be removed after TNL-8274. + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} + manage_group {{ item.name | quote }} + {% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + with_items: "{{ django_groups }}" + when: (not group_environment) or group_environment in item.environments + become: true + become_user: "{{ common_web_user }}" + + - name: Manage CMS groups + tags: + - manage-groups-cms + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} cms --settings={{ deployment_settings }} + manage_group {{ item.name | quote }} + {% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + with_items: "{{ django_groups }}" + when: (not group_environment) or group_environment in item.environments + become: true + become_user: "{{ common_web_user }}" + + - name: Manage recent LMS/CMS users + tags: + - manage-recent-users-edxapp + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} + manage_user {{ item.username | quote }} {{ item.email | quote }} + {% if item.get('groups', []) | length %}--groups {{ item.groups | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + {% if item.get('superuser') %}--superuser{% endif %} + {% if item.get('staff') %}--staff{% endif %} + {% if item.get('unusable_password') %}--unusable-password{% endif %} + {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} + with_items: "{{ django_users }}" + register: manage_users_result + failed_when: (manage_users_result is failed) and not (ignore_user_creation_errors | bool) + retries: 3 + until: manage_users_result is not failed + become: true + become_user: "{{ common_web_user }}" + + + - name: Manage active LMS/CMS users + tags: + - manage-active-users-edxapp + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} + manage_user {{ item.username | quote }} {{ item.email | quote }} + {% if item.get('groups', []) | length %}--groups {{ item.groups | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + {% if item.get('superuser') %}--superuser{% endif %} + {% if item.get('staff') %}--staff{% endif %} + {% if item.get('unusable_password') %}--unusable-password{% endif %} + {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} + with_items: "{{ django_users }}" + when: not item.get('unusable_password') + register: manage_users_result + failed_when: (manage_users_result is failed) and not (ignore_user_creation_errors | bool) + retries: 3 + until: manage_users_result is not failed + become: true + become_user: "{{ common_web_user }}" + + - name: Manage inactive LMS/CMS users + tags: + - manage-inactive-users-edxapp + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} + manage_user {{ item.username | quote }} {{ item.email | quote }} + {% if item.get('groups', []) | length %}--groups {{ item.groups | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + {% if item.get('superuser') %}--superuser{% endif %} + {% if item.get('staff') %}--staff{% endif %} + {% if item.get('unusable_password') %}--unusable-password{% endif %} + {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} + with_items: "{{ django_users }}" + when: item.get('unusable_password') + register: manage_users_result + failed_when: (manage_users_result is failed) and not (ignore_user_creation_errors | bool) + retries: 3 + until: manage_users_result is not failed + become: true + become_user: "{{ common_web_user }}" + + - name: Manage {{ service }} groups + tags: + - manage-groups-ida + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} + manage_group {{ item.name | quote }} + {% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + with_items: "{{ django_groups }}" + when: (not group_environment) or group_environment in item.environments + become: true + become_user: "{{ common_web_user }}" + + - name: Manage recent {{ service }} users + tags: + - manage-recent-users-ida + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} + manage_user {{ item.username | quote }} {{ item.email | quote }} + {% if item.get('groups', []) | length %}--groups {{ item.groups | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + {% if item.get('superuser') %}--superuser{% endif %} + {% if item.get('staff') %}--staff{% endif %} + {% if item.get('unusable_password') %}--unusable-password{% endif %} + {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} + with_items: "{{ django_users }}" + register: manage_users_result + failed_when: (manage_users_result is failed) and not (ignore_user_creation_errors | bool) + retries: 3 + until: manage_users_result is not failed + become: true + become_user: "{{ common_web_user }}" + + - name: Manage active {{ service }} users + tags: + - manage-active-users-ida + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} + manage_user {{ item.username | quote }} {{ item.email | quote }} + {% if item.get('groups', []) | length %}--groups {{ item.groups | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + {% if item.get('superuser') %}--superuser{% endif %} + {% if item.get('staff') %}--staff{% endif %} + {% if item.get('unusable_password') %}--unusable-password{% endif %} + {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} + with_items: "{{ django_users }}" + when: not item.get('unusable_password') + register: manage_users_result + failed_when: (manage_users_result is failed) and not (ignore_user_creation_errors | bool) + retries: 3 + until: manage_users_result is not failed + become: true + become_user: "{{ common_web_user }}" + + - name: Manage inactive {{ service }} users + tags: + - manage-inactive-users-ida + shell: > + . {{env_path}} && {{ python_path }} {{ manage_path }} + manage_user {{ item.username | quote }} {{ item.email | quote }} + {% if item.get('groups', []) | length %}--groups {{ item.groups | default([]) | map('quote') | join(' ') }}{% endif %} + {% if item.get('remove') %}--remove{% endif %} + {% if item.get('superuser') %}--superuser{% endif %} + {% if item.get('staff') %}--staff{% endif %} + {% if item.get('unusable_password') %}--unusable-password{% endif %} + {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} + with_items: "{{ django_users }}" + when: item.get('unusable_password') + register: manage_users_result + failed_when: (manage_users_result is failed) and not (ignore_user_creation_errors | bool) + retries: 3 + until: manage_users_result is not failed + become: true + become_user: "{{ common_web_user }}" diff --git a/playbooks/masters_sandbox.yml b/playbooks/masters_sandbox.yml new file mode 100644 index 00000000000..e658196934a --- /dev/null +++ b/playbooks/masters_sandbox.yml @@ -0,0 +1,79 @@ +# Creates initial state for a master's integration environment sandbox + +- name: setup master's integration environment + hosts: all + become: True + gather_facts: True + vars: + - edxapp_env_path: /edx/app/edxapp/edxapp_env + - edxapp_venv_dir: /edx/app/edxapp/venvs/edxapp + - edxapp_code_dir: /edx/app/edxapp/edx-platform + - registrar_env_path: /edx/app/registrar/registrar_env + - registrar_venv_dir: /edx/app/registrar/venvs/registrar + - registrar_code_dir: /edx/app/registrar/registrar + - lms_config_file: /edx/etc/lms.yml + - registrar_config_file: /edx/etc/registrar.yml + - jwt_signature_file: /tmp/lms_jwt_signature.yml + + tasks: + - name: create lms user + shell: . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms manage_user {{username}} {{email}} + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create dot application + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_dot_application "master's api client" {{username}} + --client-id {{organization_key}}-api-client-id --scopes=user_id + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create api access request + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_api_access_request {{username}} + --create-config --disconnect-signals + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create discovery site configuration + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_or_update_site_configuration {{dns_name}}.sandbox.edx.org + --configuration '{"COURSE_CATALOG_API_URL":"https://discovery-{{dns_name}}.sandbox.edx.org/api/v1","email_from_address":"edX "}' + --enabled + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create LMS catalog integration + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_catalog_integrations --enabled --internal_api_url + https://discovery-{{dns_name}}.sandbox.edx.org --service_username discovery_worker --page_size 20 + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create LMS organization + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms + add_organization {{organization_key}} {{organization_key}} + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create registrar organization + shell: > + . {{ registrar_env_path }} && {{ registrar_venv_dir }}/bin/python manage.py create_organization {{organization_key}} + --group {{registrar_role}} + args: + chdir: "{{ registrar_code_dir }}" + + - name: create registrar user + shell: > + . {{ registrar_env_path }} && {{ registrar_venv_dir }}/bin/python manage.py create_user {{username}} --email {{email}} + --groups {{organization_key}}_{{registrar_role}} + args: + chdir: "{{ registrar_code_dir }}" + + - name: set up cron job to refresh lms cache + cron: + name: "refresh masters sandbox cache" + job: ". {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python {{ edxapp_code_dir }}/manage.py lms cache_programs" + hour: "0" + minute: "0" diff --git a/playbooks/masters_sandbox_update.yml b/playbooks/masters_sandbox_update.yml new file mode 100644 index 00000000000..5a471ecc88c --- /dev/null +++ b/playbooks/masters_sandbox_update.yml @@ -0,0 +1,59 @@ +# Updates master's sandbox environment with production data + +- name: update master's integration environment + hosts: all + become: True + gather_facts: True + vars: + - edxapp_env_path: /edx/app/edxapp/edxapp_env + - edxapp_venv_dir: /edx/app/edxapp/venvs/edxapp + - edxapp_code_dir: /edx/app/edxapp/edx-platform + - discovery_env_path: /edx/app/discovery/discovery_env + - discovery_venv_dir: /edx/app/discovery/venvs/discovery + - discovery_code_dir: /edx/app/discovery/discovery + - registrar_env_path: /edx/app/registrar/registrar_env + - registrar_venv_dir: /edx/app/registrar/venvs/registrar + - registrar_code_dir: /edx/app/registrar/registrar + - prod_catalog_host: https://discovery.edx.org + - prod_oauth_host: https://courses.edx.org + - instructor_username: staff@example.com + + tasks: + - name: setup edx partner + shell: > + . {{ discovery_env_path }} && {{ discovery_venv_dir }}/bin/python manage.py + create_or_update_partner --site-domain discovery-{{dns_name}}.sandbox.edx.org --code edx --name edX + args: + chdir: "{{ discovery_code_dir }}" + + - name: pull production discovery data + shell: > + . {{ discovery_env_path }} && {{ discovery_venv_dir }}/bin/python manage.py + load_program_fixture {{ program_uuids }} + --catalog-host {{ prod_catalog_host }} + --oauth-host {{ prod_oauth_host }} + --client-id {{ client_id }} + --client-secret {{ client_secret }} + args: + chdir: "{{ discovery_code_dir }}" + + - name: update LMS program cache + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms + cache_programs + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create course shells in LMS + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py cms + sync_courses {{ instructor_username }} + args: + chdir: "{{ edxapp_code_dir }}" + + - name: load discovery programs into registrar + shell: > + . {{ registrar_env_path }} && {{ registrar_venv_dir }}/bin/python manage.py + manage_programs {{ program_uuids }} + args: + chdir: "{{ registrar_code_dir }}" diff --git a/playbooks/mfe_flags_setup.yml b/playbooks/mfe_flags_setup.yml new file mode 100644 index 00000000000..36825eaf24b --- /dev/null +++ b/playbooks/mfe_flags_setup.yml @@ -0,0 +1,11 @@ +--- + +- name: Setup required MFE waffle flags + hosts: all + become: True + gather_facts: True + vars_files: + - "roles/common_vars/defaults/main.yml" + - "roles/edxapp/defaults/main.yml" + roles: + - role: mfe_flags_setup diff --git a/playbooks/minos.yml b/playbooks/minos.yml new file mode 100644 index 00000000000..eca799dfe65 --- /dev/null +++ b/playbooks/minos.yml @@ -0,0 +1,11 @@ +- name: Deploy edxapp + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - minos diff --git a/playbooks/mongo.yml b/playbooks/mongo.yml new file mode 100644 index 00000000000..3fed6ec11ab --- /dev/null +++ b/playbooks/mongo.yml @@ -0,0 +1,17 @@ +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/mongo_3_0.yml b/playbooks/mongo_3_0.yml new file mode 100644 index 00000000000..dca352b65f9 --- /dev/null +++ b/playbooks/mongo_3_0.yml @@ -0,0 +1,31 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_3_0 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_3_0.yml -i 10.1.1.1,10.2.2.2,10.3.3.3 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_3_0.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_3_0.yml -i 10.1.1.1,10.2.2.2,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - enhanced_networking + - mongo_3_0 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/mongo_3_2.yml b/playbooks/mongo_3_2.yml new file mode 100644 index 00000000000..c22c43ebae1 --- /dev/null +++ b/playbooks/mongo_3_2.yml @@ -0,0 +1,36 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_3_2 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_3_2.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_3_2.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_3_2.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_3_2 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/mongo_3_4.yml b/playbooks/mongo_3_4.yml new file mode 100644 index 00000000000..139f46931ea --- /dev/null +++ b/playbooks/mongo_3_4.yml @@ -0,0 +1,35 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_3_4 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_3_4.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_3_4.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_3_4.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_3_4 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_3_6.yml b/playbooks/mongo_3_6.yml new file mode 100644 index 00000000000..5056d2789de --- /dev/null +++ b/playbooks/mongo_3_6.yml @@ -0,0 +1,35 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_3_6 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_3_6.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_3_6.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_3_6.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_3_6 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_4_0.yml b/playbooks/mongo_4_0.yml new file mode 100644 index 00000000000..42f7e6ac9b1 --- /dev/null +++ b/playbooks/mongo_4_0.yml @@ -0,0 +1,35 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_4_0 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_4_0.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_4_0.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_4_0.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_4_0 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_4_2.yml b/playbooks/mongo_4_2.yml new file mode 100644 index 00000000000..50e75177f24 --- /dev/null +++ b/playbooks/mongo_4_2.yml @@ -0,0 +1,29 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_4_2 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_4_2.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_4_2.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_4_2.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_4_2 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_4_4.yml b/playbooks/mongo_4_4.yml new file mode 100644 index 00000000000..b11f769fccd --- /dev/null +++ b/playbooks/mongo_4_4.yml @@ -0,0 +1,29 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_4_4 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_4_4.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_4_4.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_4_4.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_4_4 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_5_0.yml b/playbooks/mongo_5_0.yml new file mode 100644 index 00000000000..24a7a20b06b --- /dev/null +++ b/playbooks/mongo_5_0.yml @@ -0,0 +1,29 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_4_4 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_4_4.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_4_4.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_4_4.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_5_0 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_6_0.yml b/playbooks/mongo_6_0.yml new file mode 100644 index 00000000000..6799b7ec7bc --- /dev/null +++ b/playbooks/mongo_6_0.yml @@ -0,0 +1,29 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_6_0 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_6_0.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_6_0.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_6_0.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_6_0 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_7_0.yml b/playbooks/mongo_7_0.yml new file mode 100644 index 00000000000..1fd4c6aa8e9 --- /dev/null +++ b/playbooks/mongo_7_0.yml @@ -0,0 +1,29 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_7_0 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_7_0.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_7_0.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_7_0.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_7_0 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_mms.yml b/playbooks/mongo_mms.yml new file mode 100644 index 00000000000..f22c031ebbe --- /dev/null +++ b/playbooks/mongo_mms.yml @@ -0,0 +1,15 @@ +- name: Deploy mongo_mms instance + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_mms + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_rolling_upgrade.yml b/playbooks/mongo_rolling_upgrade.yml new file mode 100644 index 00000000000..d62b77ba95c --- /dev/null +++ b/playbooks/mongo_rolling_upgrade.yml @@ -0,0 +1,155 @@ +# Upgrades a full mongo cluster, starting with the hidden members, then the +# secondary, and finally steps down the primary and upgrades it. It checks along +# the way for a healthy cluster, failing if that is not true. +# +# This play expects to have access to a config file where MONGO_RS_CONFIG, as described +# in the mongo_3_0 role, is defined, as well as MONGO_ADMIN_USER and MONGO_ADMIN_PASSWORD. +# +# ansible-playbook -i 127.0.0.1, mongo_rolling_upgrade.yml -e@/path/to/config-file.yml +# +# This play uses MONGO_RS_CONFIG to find a host to connect to and fetch replset config and build an +# inventory, so you can just target localhost. +# +# If there are no hidden secondaries, the 'Upgrade hidden members' task block will just skip. +# +# This will process a hidden secondary twice - first as a 'hidden' server, then as a 'secondary' but +# this is effectively a no-op except for apt checking the versions and then checking that mongo is running. +# It is valid to have other types of hidden machines, so this seemed better than skipping. +# +# If you wish to avoid updating the primary, you can add -e 'SKIP_PRIMARY=true' to your ansible +# invocation. + +- name: Find hidden secondaries + hosts: 127.0.0.1 + connection: local + gather_facts: False + vars: + - SKIP_PRIMARY: False + tasks: + - name: Get configuration of mongo cluster + mongodb_rs_config: + host: "{{ (MONGO_RS_CONFIG.members|map(attribute='host')|list)[0] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: rs_config + - name: Build inventory of hidden members + add_host: + hostname: "{{ (item.host.split(':'))[0] }}" + instance_id: "{{ item._id }}" + groups: hidden_hosts + ansible_ssh_user: ubuntu + with_items: + - "{{ rs_config.hidden }}" + - name: Build inventory of secondary members + add_host: + hostname: "{{ (item.host.split(':'))[0] }}" + instance_id: "{{ item._id }}" + groups: secondary_hosts + ansible_ssh_user: ubuntu + with_items: + - "{{ rs_config.secondary }}" + - name: Build inventory of primary members + add_host: + hostname: "{{ (item.host.split(':'))[0] }}" + instance_id: "{{ item._id }}" + groups: primary_hosts + ansible_ssh_user: ubuntu + with_items: + - "{{ rs_config.primary }}" + when: not SKIP_PRIMARY + +- name: Upgrade hidden members + hosts: hidden_hosts + gather_facts: True + become: True + vars_files: + - ../roles/mongo_3_0/defaults/main.yml + tasks: + - name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + with_items: "{{ mongodb_debian_pkgs }}" + - name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + - name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_default_ipv4['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + # This ensures that no servers are in a state other than PRIMARY or SECONDARY. https://docs.mongodb.com/manual/reference/replica-states/ + until: status.status is defined and not (['PRIMARY','SECONDARY'] | symmetric_difference(status.status.members|map(attribute='stateStr')|list|unique)) + retries: 5 + delay: 2 + +- name: Upgrade secondary members + hosts: secondary_hosts + gather_facts: True + become: True + serial: 1 + vars_files: + - ../roles/mongo_3_0/defaults/main.yml + tasks: + - name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + with_items: "{{ mongodb_debian_pkgs }}" + - name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + - name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_default_ipv4['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + # This ensures that no servers are in a state other than PRIMARY or SECONDARY. https://docs.mongodb.com/manual/reference/replica-states/ + until: status.status is defined and not (['PRIMARY','SECONDARY'] | symmetric_difference(status.status.members|map(attribute='stateStr')|list|unique)) + retries: 5 + delay: 2 + +- name: Upgrade primary members + hosts: primary_hosts + gather_facts: True + become: True + vars_files: + - ../roles/mongo_3_0/defaults/main.yml + tasks: + - name: Step down (this can take up to a minute to complete while the primary waits on a secondary) + mongodb_step_down: + host: "{{ ansible_default_ipv4['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + - name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + with_items: "{{ mongodb_debian_pkgs }}" + - name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + - name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_default_ipv4['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + # This ensures that no servers are in a state other than PRIMARY or SECONDARY. https://docs.mongodb.com/manual/reference/replica-states/ + until: status.status is defined and not (['PRIMARY','SECONDARY'] | symmetric_difference(status.status.members|map(attribute='stateStr')|list|unique)) + retries: 5 + delay: 2 diff --git a/playbooks/mongo_upgrade_hidden_secondaries.yml b/playbooks/mongo_upgrade_hidden_secondaries.yml new file mode 100644 index 00000000000..a27fa5204cf --- /dev/null +++ b/playbooks/mongo_upgrade_hidden_secondaries.yml @@ -0,0 +1,65 @@ +# Upgrades the hidden secondary in a mongo cluster if one exists +# +# This is useful for using the hidden secondary to sniff out any problems with +# your point upgrade before you upgrade your primary/secondary servers using the +# mongo_rolling_upgrade.yml play +# +# This play expects to have access to a config file where MONGO_RS_CONFIG, as described +# in the mongo_3_0 role, is defined, as well as MONGO_ADMIN_USER and MONGO_ADMIN_PASSWORD. +# +# ansible-playbook -i 127.0.0.1, mongo_upgrade_hidden_secondaries.yml -e@/path/to/config-file.yml +# +# This play uses MONGO_RS_CONFIG to find a host to connect to and fetch replset config and build an +# inventory, so you can just target localhost. +# +# If there are no hidden secondaries, the 'Upgrade hidden members' task will just skip. + +- name: Find hidden secondaries + hosts: 127.0.0.1 + connection: local + gather_facts: False + tasks: + - name: Get configuration of mongo cluster + mongodb_rs_config: + host: "{{ (MONGO_RS_CONFIG.members|map(attribute='host')|list)[0] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: rs_config + - name: Build inventory of hidden secondaries + add_host: + hostname: "{{ (item.host.split(':'))[0] }}" + instance_id: "{{ item._id }}" + groups: hidden_hosts + ansible_ssh_user: ubuntu + with_items: + - "{{ rs_config.hidden }}" + +- name: Upgrade hidden members + hosts: hidden_hosts + gather_facts: True + become: True + vars_files: + - ../roles/mongo_3_0/defaults/main.yml + tasks: + - name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + with_items: "{{ mongodb_debian_pkgs }}" + - name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + - name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_default_ipv4['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + retries: 5 + delay: 2 + run_once: true diff --git a/playbooks/mysql.yml b/playbooks/mysql.yml new file mode 100644 index 00000000000..df3766a923f --- /dev/null +++ b/playbooks/mysql.yml @@ -0,0 +1,6 @@ +- name: Deploy MySQL + hosts: all + become: True + gather_facts: True + roles: + - mysql diff --git a/playbooks/neo4j.yml b/playbooks/neo4j.yml new file mode 100644 index 00000000000..d96a3182ad9 --- /dev/null +++ b/playbooks/neo4j.yml @@ -0,0 +1,19 @@ +- name: Deploy neo4j for coursegraph + hosts: all + sudo: True + gather_facts: True + vars: + CLUSTER_NAME: 'coursegraph' + roles: + - role: nginx + nginx_template_dir: "../../roles/neo4j/templates/edx/app/nginx/sites-available" + nginx_sites: + - coursegraph + nginx_default_sites: + - coursegraph +# - aws + - neo4j + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE \ No newline at end of file diff --git a/playbooks/newrelic_mongo_monitor.yml b/playbooks/newrelic_mongo_monitor.yml new file mode 100644 index 00000000000..2848528850d --- /dev/null +++ b/playbooks/newrelic_mongo_monitor.yml @@ -0,0 +1,7 @@ + +- name: Configure newrelic mongo monitoring + hosts: all + become: True + gather_facts: True + roles: + - mongo_newrelic_monitor diff --git a/playbooks/notes.yml b/playbooks/notes.yml new file mode 100644 index 00000000000..0f0d0149cb8 --- /dev/null +++ b/playbooks/notes.yml @@ -0,0 +1,26 @@ +- name: Deploy edX Notes API + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - edx_notes_api + - edx_notes_api + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'edx_notes_api' + when: EDX_NOTES_API_HERMES_ENABLED + diff --git a/playbooks/oauth_client_setup.yml b/playbooks/oauth_client_setup.yml new file mode 100644 index 00000000000..c2bb54ff1cf --- /dev/null +++ b/playbooks/oauth_client_setup.yml @@ -0,0 +1,16 @@ +- name: Configure OAuth2 clients + hosts: all + become: True + gather_facts: True + vars_files: + - "roles/common_vars/defaults/main.yml" + - "roles/edxapp/defaults/main.yml" + - "roles/insights/defaults/main.yml" + - "roles/ecommerce/defaults/main.yml" + - "roles/credentials/defaults/main.yml" + - "roles/discovery/defaults/main.yml" + - "roles/registrar/defaults/main.yml" + - "roles/designer/defaults/main.yml" + - "roles/enterprise_catalog/defaults/main.yml" + roles: + - oauth_client_setup diff --git a/playbooks/openedx_native.yml b/playbooks/openedx_native.yml new file mode 100644 index 00000000000..009f8edfb07 --- /dev/null +++ b/playbooks/openedx_native.yml @@ -0,0 +1,144 @@ +--- + +# Open edX Native installation for single server community installs. + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + migrate_db: "yes" + MFE_DEPLOY_NGINX_PORT: 19010 + MFE_BASE: "{{ EDXAPP_LMS_BASE }}:{{ MFE_DEPLOY_NGINX_PORT }}" + MFE_DEPLOY_COMMON_HOSTNAME: "{{ EDXAPP_LMS_BASE }}" + EDXAPP_PREVIEW_LMS_BASE: 'preview.{{ EDXAPP_LMS_BASE }}' + EDXAPP_LOGIN_REDIRECT_WHITELIST: [ + "{{ EDXAPP_CMS_BASE }}", + "{{ MFE_BASE }}", + ] + EDXAPP_CORS_ORIGIN_WHITELIST: [ + "{{ MFE_BASE }}", + ] + + EDXAPP_CSRF_TRUSTED_ORIGINS: [ + "{{ MFE_BASE }}", + ] + EDXAPP_ENABLE_CORS_HEADERS: true + EDXAPP_ENABLE_CROSS_DOMAIN_CSRF_COOKIE: true + EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: '{{ EDXAPP_LMS_BASE }}' + EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_NAME: 'native-csrf-cookie' + EDXAPP_LMS_BASE_SCHEME: http + COMMON_LMS_BASE_URL: "{{ EDXAPP_LMS_BASE_SCHEME }}://{{ EDXAPP_LMS_BASE }}" + EDXAPP_ACCOUNT_MICROFRONTEND_URL: "{{ EDXAPP_LMS_BASE_SCHEME }}://{{ MFE_BASE }}/account" + EDXAPP_LMS_NGINX_PORT: '80' + EDX_PLATFORM_VERSION: 'master' + EDXAPP_ORDER_HISTORY_MICROFRONTEND_URL: "{{ EDXAPP_LMS_BASE_SCHEME }}://{{ MFE_BASE }}/ecommerce/orders" + EDXAPP_SITE_CONFIGURATION: + - values: + ENABLE_ORDER_HISTORY_MICROFRONTEND: "{{ SANDBOX_ENABLE_ECOMMERCE }}" + + # Set to false if deployed behind another proxy/load balancer. + NGINX_SET_X_FORWARDED_HEADERS: True + DISCOVERY_URL_ROOT: 'http://localhost:{{ DISCOVERY_NGINX_PORT }}' + AWS_GATHER_FACTS: false + COMMON_ENABLE_AWS_ROLE: false + ecommerce_create_demo_data: true + credentials_create_demo_data: true + CONFIGURE_JWTS: true + SANDBOX_ENABLE_BLOCKSTORE: false + SANDBOX_ENABLE_DISCOVERY: true + SANDBOX_ENABLE_ECOMMERCE: true + SANDBOX_ENABLE_ANALYTICS_API: true + SANDBOX_ENABLE_INSIGHTS: true + SANDBOX_ENABLE_REDIS: true + SANDBOX_ENABLE_NOTES: false + SANDBOX_ENABLE_OPEN_SEARCH: TRUE + DEMO_ROLE_ENABLED: true + ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING: false + EDXAPP_ENABLE_MEMCACHE: true + EDXAPP_ENABLE_ELASTIC_SEARCH: true + # Ecommerce + ECOMMERCE_CORS_ORIGIN_WHITELIST: [ + "{{ EDXAPP_LMS_BASE_SCHEME }}://{{ MFE_BASE }}", + ] + ECOMMERCE_CSRF_TRUSTED_ORIGINS: [ + "{{ EDXAPP_LMS_BASE_SCHEME }}://{{ MFE_BASE }}", + ] + ECOMMERCE_CORS_ALLOW_CREDENTIALS: true + # For the mfe role. + COMMON_ECOMMERCE_BASE_URL: '{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}' + ECOMMERCE_ENABLE_PAYMENT_MFE: true + + EDXAPP_ENABLE_CLOUDWATCH: false + cloudwatch_logs_enabled: '{{ EDXAPP_ENABLE_CLOUDWATCH }}' + roles: + - role: swapfile + SWAPFILE_SIZE: 4GB + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - cms + - lms + - forum + - xqueue + nginx_default_sites: + - lms + - role: edxlocal + when: EDXAPP_MYSQL_HOST == 'localhost' + - role: memcache + when: EDXAPP_ENABLE_MEMCACHE + - role: mongo_4_0 + when: "'localhost' in EDXAPP_MONGO_HOSTS" + - role: redis + when: SANDBOX_ENABLE_REDIS + - role: edxapp + celery_worker: True + - edxapp + - role: blockstore + when: SANDBOX_ENABLE_BLOCKSTORE + - role: ecommerce + when: SANDBOX_ENABLE_ECOMMERCE + - role: ecomworker + ECOMMERCE_WORKER_BROKER_HOST: 127.0.0.1 + when: SANDBOX_ENABLE_ECOMMERCE + - role: analytics_api + when: SANDBOX_ENABLE_ANALYTICS_API + - role: insights + when: SANDBOX_ENABLE_INSIGHTS + - role: edx_notes_api + when: SANDBOX_ENABLE_NOTES + # Run the nginx role to install edx_notes_api config since the app role + # currently doesn't do that. + - role: nginx + nginx_sites: + - edx_notes_api + when: SANDBOX_ENABLE_NOTES + - role: demo + when: DEMO_ROLE_ENABLED + - oauth_client_setup + - role: elasticsearch + when: EDXAPP_ENABLE_ELASTIC_SEARCH + - role: opensearch + when: SANDBOX_ENABLE_OPEN_SEARCH + - forum + - role: discovery + when: SANDBOX_ENABLE_DISCOVERY + - role: xqueue + update_users: True + - edx_ansible + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: aws_cloudwatch_agent + when: EDXAPP_ENABLE_CLOUDWATCH + - role: postfix_queue + when: POSTFIX_QUEUE_EXTERNAL_SMTP_HOST != '' + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: user_retirement_pipeline + when: COMMON_RETIREMENT_SERVICE_SETUP + - role: mfe_deployer + MFE_DEPLOY_ECOMMERCE_MFES: "{{ SANDBOX_ENABLE_ECOMMERCE }}" + - role: mfe_flags_setup diff --git a/playbooks/opensearch.yml b/playbooks/opensearch.yml new file mode 100644 index 00000000000..c499d0b0958 --- /dev/null +++ b/playbooks/opensearch.yml @@ -0,0 +1,45 @@ +- hosts: all + become: True + vars: + # By default take instances in and out of the elb(s) they + # are attached to + # To skip elb operations use "-e elb_pre_post=fase" + elb_pre_post: true + # Number of instances to operate on at a time + serial_count: 1 + CLUSTER_NAME: "commoncluster" + serial: "{{ serial_count }}" + pre_tasks: + - action: ec2_metadata_facts + when: elb_pre_post + - debug: + var: ansible_ec2_instance_id + when: elb_pre_post + - name: Instance De-register + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + region: us-east-1 + state: absent + wait_timeout: 60 + become: False + when: elb_pre_post + roles: + - common + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - opensearch + post_tasks: + - debug: + var: ansible_ec2_instance_id + when: elb_pre_post + - name: Register instance in the elb + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ ec2_elbs }}" + region: us-east-1 + state: present + wait_timeout: 60 + become: False + when: elb_pre_post diff --git a/playbooks/ora2.yml b/playbooks/ora2.yml new file mode 100644 index 00000000000..4fd52106a8a --- /dev/null +++ b/playbooks/ora2.yml @@ -0,0 +1,50 @@ +# Deploy a specific version of edx-ora2 and re-run migrations +# edx-ora2 is already included in the requirements for edx-platform, +# but we need to override that version when deploying to +# the continuous integration server for testing ora2 changes. + +- name: Update edx-ora2 + hosts: all + become: True + gather_facts: True + vars: + - env_path: /edx/app/edxapp/edxapp_env + - edxapp_venv_dir: "/edx/app/edxapp/venvs/edxapp" + - edxapp_code_dir: "/edx/app/edxapp/edx-platform" + - edxapp_deploy_path: "{{ edxapp_venv_dir }}/bin:{{ edxapp_code_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + - edxapp_user: "edxapp" + - edxapp_mysql_user: "migrate" + - edxapp_mysql_password: "password" + - supervisorctl_path: "/edx/bin/supervisorctl" + - ora2_version: "master" + - ora2_pip_req: "git+https://github.com/openedx/edx-ora2.git@{{ ora2_version }}#egg=edx-ora2" + + tasks: + - name: install edx-ora2 + shell: > + {{ edxapp_venv_dir }}/bin/pip install -e {{ ora2_pip_req }} + chdir={{ edxapp_code_dir }} + environment: + PATH: "{{ edxapp_deploy_path }}" + become_user: "{{ edxapp_user }}" + tags: + - deploy + + - name: migrate + shell: > + . {{env_path}} && {{ edxapp_venv_dir }}/bin/python manage.py lms migrate --settings=production --noinput + chdir={{ edxapp_code_dir }} + environment: + DB_MIGRATION_USER: "{{ edxapp_mysql_user }}" + DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}" + tags: + - deploy + + - name: restart lms + shell: "{{ supervisorctl_path }} restart lms" + + - name: restart studio + shell: "{{ supervisorctl_path }} restart cms" + + - name: restart workers + shell: "{{ supervisorctl_path }} restart edxapp_worker:" diff --git a/playbooks/ora_grading.yml b/playbooks/ora_grading.yml new file mode 100644 index 00000000000..8d63e0c6e65 --- /dev/null +++ b/playbooks/ora_grading.yml @@ -0,0 +1,17 @@ +- name: Deploy the ORA Grading MFE Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'ora-grading' + ORA_GRADING_ENABLED: True + ORA_GRADING_SANDBOX_BUILD: False + roles: + - role: mfe + MFE_NAME: ora-grading + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELICE_INFRASTRUCTURE + diff --git a/playbooks/payment.yml b/playbooks/payment.yml new file mode 100644 index 00000000000..1dc60f5a5e8 --- /dev/null +++ b/playbooks/payment.yml @@ -0,0 +1,16 @@ +- name: Deploy payment MFE Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'payment' + PAYMENT_MFE_ENABLED: True + PAYMENT_MFE_SANDBOX_BUILD: False + roles: + - role: payment + MFE_NAME: payment + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/populate_configuration_model.yml b/playbooks/populate_configuration_model.yml new file mode 100644 index 00000000000..a76f224fb5b --- /dev/null +++ b/playbooks/populate_configuration_model.yml @@ -0,0 +1,59 @@ +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Usage: ansible-playbook -i lms-host-1, ./populate_configuration_model.yml -e "file=/path/to/json/file" -e "user=username" +# +# Overview: +# This executes the "populate_model" management command to populate a ConfigurationModel with +# data from the supplied JSON file. +# +# The username of an existing user must be specified to indicate who is performing the operation. +# +# JSON file format should be: +# +# { "model": "config_models.ExampleConfigurationModel", +# "data": +# [ +# { "enabled": True, +# "color": "black" +# ... +# }, +# { "enabled": False, +# "color": "yellow" +# ... +# }, +# ... +# ] +# } +# + +- hosts: all + vars: + python_path: /edx/app/edxapp/venvs/edxapp/bin/python + manage_path: /edx/bin/manage.edxapp + lms_env: /edx/app/edxapp/edxapp_env + become_user: www-data + become: true + tasks: + - name: Create a temp directory + shell: mktemp -d /tmp/ansible_xblock_config.XXXXX + register: xblock_config_temp_directory + - name: Copy config file to remote server + copy: + src: "{{ file }}" + dest: "{{ xblock_config_temp_directory.stdout }}/{{ file | basename }}" + register: xblock_config_file + - name: Manage xblock configurations + shell: ". {{lms_env}} && {{ python_path }} {{ manage_path }} lms --settings=production populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}" + register: command_result + changed_when: "'Import complete, 0 new entries created' not in command_result.stdout" + - debug: msg="{{ command_result.stdout }}" + - name: Clean up tempdir + file: + path: "{{ xblock_config_temp_directory.stdout }}" + state: absent diff --git a/playbooks/profile.yml b/playbooks/profile.yml new file mode 100644 index 00000000000..2698d9820d3 --- /dev/null +++ b/playbooks/profile.yml @@ -0,0 +1,17 @@ +- name: Deploy profile Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'profile' + PROFILE_MFE_ENABLED: True + PROFILE_MFE_SANDBOX_BUILD: False + roles: + - role: mfe + MFE_NAME: profile + MFE_VERSION: '{{ PROFILE_MFE_VERSION }}' + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/program_console.yml b/playbooks/program_console.yml new file mode 100644 index 00000000000..1c4f2ab4ca6 --- /dev/null +++ b/playbooks/program_console.yml @@ -0,0 +1,21 @@ +- name: Deploy Program-console Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'program-console' + PROGRAM_CONSOLE_ENABLED: True + PROGRAM_CONSOLE_SANDBOX_BUILD: False + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - program_console + PROGRAM_CONSOLE_NGINX_PORT: 8976 + - program_console + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/promote_rds_secondary.yml b/playbooks/promote_rds_secondary.yml new file mode 100644 index 00000000000..0e0e588462d --- /dev/null +++ b/playbooks/promote_rds_secondary.yml @@ -0,0 +1,63 @@ +# Step 2 of migrating to the MySQL separate-database StudentModuleHistory backend +# Step 1 is in create_edxapp_history_db.yml +# +# Usage: AWS_PROFILE=myprofile ansible-playbook promote_csmh_db.yml -i localhost, -e 'rds_name=env-dep-csm admin_password=SUPERSECRET' + +#NB: should this do tags? + +- name: Promote new edxapp history RDS instance + hosts: all + connection: local + gather_facts: false + vars: + rds_name: + region: us-east-1 + admin_password: + backup_retention_days: 30 + backup_window: 02:00-03:00 + maint_window: Mon:00:00-Mon:01:15 + + tasks: + - name: Validate arguments + fail: + msg: "One or more arguments were not set correctly: {{ item }}" + when: not item + with_items: + - rds_name + - admin_password + + - name: Validate boto version >= 1.9.9 + shell: | + version=$(aws --version 2>&1 | sed -r 's|.*aws-cli/([0-9]+\.[0-9]+\.[0-9]).*|\1|') + if [ $version != '1.9.9' ]; then + cmp=$(echo -e "$version\n1.9.9" | sort -rV | head -n1) + [ $cmp = "1.9.9" ] && exit 1 || exit 0 + fi + changed_when: False + + - name: Promote edxapp history RDS to primary instance + #Use local module for promoting only because of this issue: + # + rds_local: + command: promote + instance_name: "{{ rds_name }}" + region: "{{ region }}" + wait: yes + wait_timeout: 900 + + #Can't use the module if you want to be able to set storage types until this PR lands: + # + #The StorageType option isn't in boto, but it is in boto3 + #Requires awscli>=1.9.9 + - name: Modify edxapp history RDS + shell: > + aws rds modify-db-instance + --db-instance-identifier {{ rds_name }} + --apply-immediately + --multi-az + --master-user-password {{ admin_password }} + --publicly-accessible + --backup-retention-period {{ backup_retention_days }} + --preferred-backup-window {{ backup_window }} + --preferred-maintenance-window {{ maint_window }} + --storage-type gp2 diff --git a/playbooks/prospectus.yml b/playbooks/prospectus.yml new file mode 100644 index 00000000000..98cbcb2c1d2 --- /dev/null +++ b/playbooks/prospectus.yml @@ -0,0 +1,36 @@ +- name: Deploy edX Prospectus Service + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'prospectus' + PROSPECTUS_DATA_DIR: "/edx/var/prospectus" + NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE: True + NGINX_MAP_HASH_MAX_SIZE: 4096 + NGINX_MAP_HASH_BUCKET_SIZE: 128 + PROSPECTUS_ENABLED: True + PROSPECTUS_SANDBOX_BUILD: FALSE + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE|bool and PROSPECTUS_ENABLE_PRE_BUILD|bool + - role: automated + AUTOMATED_USERS: "{{ PROSPECTUS_AUTOMATED_USERS | default({}) }}" + when: PROSPECTUS_ENABLE_PRE_BUILD|bool + - role: prospectus + when: PROSPECTUS_ENABLE_BUILD|bool + - role: nginx + when: PROSPECTUS_ENABLE_NGINX|bool + nginx_app_dir: "/etc/nginx" + nginx_sites: + - prospectus + nginx_default_sites: + - prospectus + PROSPECTUS_NGINX_PORT: 8000 + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER|bool and PROSPECTUS_ENABLE_POST_BUILD|bool + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE|bool and PROSPECTUS_ENABLE_POST_BUILD|bool + - role: datadog + when: COMMON_ENABLE_DATADOG|bool and PROSPECTUS_ENABLE_POST_BUILD|bool diff --git a/playbooks/prospectus_sandbox.yml b/playbooks/prospectus_sandbox.yml new file mode 100644 index 00000000000..a620cd37355 --- /dev/null +++ b/playbooks/prospectus_sandbox.yml @@ -0,0 +1,23 @@ +# Restarts supervisor and nginx tasks for a sandbox + +- name: restart supervisor/nginx for a sandbox + hosts: all + become: True + gather_facts: True + + tasks: + - name: stop supervisor + shell: > + sudo service supervisor stop + + - name: stop nginx + shell: > + sudo service nginx stop + + - name: kill www-data tasks + shell: > + sudo pkill -u www-data + + - name: restart nginx + shell: > + sudo service nginx start \ No newline at end of file diff --git a/playbooks/rabbitmq.yml b/playbooks/rabbitmq.yml new file mode 100644 index 00000000000..0e166a9bf00 --- /dev/null +++ b/playbooks/rabbitmq.yml @@ -0,0 +1,57 @@ +- name: Deploy rabbitmq + hosts: all + become: True + # The rabbitmq role depends on + # ansible_default_ipv4 so + # gather_facts must be set to True + gather_facts: True + vars: + # By default take instances in and out of the elb(s) they + # are attached to + # To skip elb operations use "-e elb_pre_post=fase" + elb_pre_post: true + # Number of instances to operate on at a time + serial_count: 1 + CLUSTER_NAME: 'rabbitmq' + serial: "{{ serial_count }}" + pre_tasks: + - action: ec2_metadata_facts + when: elb_pre_post + - debug: + var: ansible_ec2_instance_id + when: elb_pre_post + - name: Instance De-register + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + region: us-east-1 + state: absent + wait_timeout: 60 + become: False + when: elb_pre_post + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: automated + AUTOMATED_USERS: "{{ RABBIT_AUTOMATED_USERS | default({}) }}" + tags: + - automated_role + - rabbitmq + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + post_tasks: + - debug: + var: ansible_ec2_instance_id + when: elb_pre_post + - name: Register instance in the elb + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ ec2_elbs }}" + region: us-east-1 + state: present + wait_timeout: 60 + become: False + when: elb_pre_post diff --git a/playbooks/redirector.yml b/playbooks/redirector.yml new file mode 100644 index 00000000000..0b2128f9118 --- /dev/null +++ b/playbooks/redirector.yml @@ -0,0 +1,25 @@ +- name: Deploy redirector host + hosts: all + sudo: True + gather_facts: True + vars: + serial_count: 1 + CLUSTER_NAME: 'redirector' + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_redirects: "{{ NGINX_REDIRECTOR_CUSTOM_REDIRECTS }}" + REDIRECT_NGINX_PORT: "80" + REDIRECT_SSL_NGINX_PORT: "443" + NGINX_ENABLE_SSL: "{{ REDIRECT_NGINX_ENABLE_SSL }}" + NGINX_SSL_CERTIFICATE: "{{ REDIRECT_NGINX_SSL_CERTIFICATE }}" + NGINX_SSL_KEY: "{{ REDIRECT_NGINX_SSL_KEY }}" + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/redis.yml b/playbooks/redis.yml new file mode 100644 index 00000000000..7565d85e285 --- /dev/null +++ b/playbooks/redis.yml @@ -0,0 +1,17 @@ +- name: Deploy redis + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - redis + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/registrar.yml b/playbooks/registrar.yml new file mode 100644 index 00000000000..018baceae81 --- /dev/null +++ b/playbooks/registrar.yml @@ -0,0 +1,22 @@ +- name: Deploy edX Registrar + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'registrar' + REGISTRAR_ENABLED: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - registrar + - registrar + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'registrar' + when: REGISTRAR_HERMES_ENABLED diff --git a/playbooks/remove-ubuntu-key.yml b/playbooks/remove-ubuntu-key.yml new file mode 100644 index 00000000000..0c1f0fb0147 --- /dev/null +++ b/playbooks/remove-ubuntu-key.yml @@ -0,0 +1,38 @@ +# A simple utility play to remove a public key from the authorized key +# file for the ubuntu user +# You must pass in the entire line that you are adding +- hosts: all + vars: + # Number of instances to operate on at a time + serial_count: 1 + owner: ubuntu + keyfile: "/home/{{ owner }}/.ssh/authorized_keys" + serial: "{{ serial_count }}" + tasks: + - fail: msg="You must pass in a public_key" + when: public_key is not defined + - fail: msg="public does not exist in secrets" + when: ubuntu_public_keys[public_key] is not defined + - command: mktemp + register: mktemp + # This command will fail if this returns zero lines which will prevent + # the last key from being removed + - shell: "grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}" + - shell: "while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}" + args: + executable: /bin/bash + register: keycheck + - fail: msg="public key check failed!" + when: keycheck.stderr != "" + - command: cp {{ mktemp.stdout }} {{ keyfile }} + - file: + path: "{{ keyfile }}" + owner: "{{ owner }}" + mode: 0600 + - file: + path: "{{ mktemp.stdout }}" + state: absent + - shell: wc -l < {{ keyfile }} + register: line_count + - fail: msg="There should only be one line in ubuntu's authorized_keys" + when: line_count.stdout|int != 1 diff --git a/playbooks/restart_supervisor.yml b/playbooks/restart_supervisor.yml new file mode 100644 index 00000000000..6724da9f9e6 --- /dev/null +++ b/playbooks/restart_supervisor.yml @@ -0,0 +1,15 @@ +- name: restarts supervisor + hosts: all + become: True + gather_facts: False + vars_files: + - roles/common_vars/defaults/main.yml + - roles/supervisor/defaults/main.yml + tasks: + - name: supervisor | restart supervisor + service: + name: "{{ supervisor_service }}" + state: restarted + register: rc + until: rc is succeeded + retries: 5 diff --git a/playbooks/retire_host.yml b/playbooks/retire_host.yml new file mode 100644 index 00000000000..fc7a4541780 --- /dev/null +++ b/playbooks/retire_host.yml @@ -0,0 +1,55 @@ +# ansible-playbook -i ./lifecycle_inventory.py ./retire_host.yml +# -e@/vars/env.yml --limit Terminating_Wait -e TARGET="Terminating_Wait" +# +# Note that the target now must be specified as an argument +# + +# +# This is separate because it's use of handlers +# leads to various race conditions. +# + +- name: Stop all services + hosts: "{{TARGET}}" + become: True + gather_facts: True + roles: + - stop_all_edx_services + +- name: Server retirement workflow + hosts: "{{TARGET}}" + become: True + gather_facts: False + tasks: + - name: Terminate existing object store log sync + shell: /usr/bin/pkill send-logs-to-object-store || true + - name: "Ensure send-logs-to-object-store script is in the logrotate file" + shell: grep send-logs-to-object-store /etc/logrotate.d/hourly/tracking.log + # We only force a rotation of edx logs. + # Forced rotation of system logfiles will only + # work if there hasn't already been a previous rotation + # The logrotate will also call send-logs-to-s3 but hasn't + # been updated for all servers yet. + - name: Force a log rotation which will call the log sync + command: /usr/sbin/logrotate -f /etc/logrotate.d/hourly/{{ item }} + with_items: + - "tracking.log" + - "edx-services" + # This catches the case where tracking.log is 0b + - name: Sync again + command: /edx/bin/send-logs-to-object-store -d "{{ COMMON_LOG_DIR }}/tracking/" -b "{{ COMMON_OBJECT_STORE_LOG_SYNC_BUCKET }}/logs/tracking" + - name: Sync edX services logs again + command: "/edx/bin/send-logs-to-object-store -d {{ COMMON_LOG_DIR }}{{ item.src }} -b {{ COMMON_OBJECT_STORE_LOG_SYNC_BUCKET }} -p {{ item.dest }}" + with_items: + - { src: '/lms/', dest: '{{ COMMON_OBJECT_STORE_EDX_LOG_SYNC_PREFIX | default("logs/edx/") }}lms/' } + - { src: '/cms/', dest: '{{ COMMON_OBJECT_STORE_EDX_LOG_SYNC_PREFIX | default("logs/edx/") }}cms/' } + when: (COMMON_OBJECT_STORE_EDX_LOG_SYNC is defined) and COMMON_OBJECT_STORE_EDX_LOG_SYNC == true + +- name: Run minos verification + hosts: "{{TARGET}}" + become: True + gather_facts: False + tasks: + - name: Run minos + command: /edx/app/minos/venvs/bin/minos --config /edx/etc/minos.yml --json + ignore_errors: yes diff --git a/playbooks/roles/ad_hoc_reporting/defaults/main.yml b/playbooks/roles/ad_hoc_reporting/defaults/main.yml new file mode 100644 index 00000000000..0df98ea5b5d --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/defaults/main.yml @@ -0,0 +1,62 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# +# Defaults for role ad_hoc_reporting +# + +# These users are given access +# to the databases from ad hoc reporting environment, +# it needs to be a subset of the +# users created on the box which is +# COMMON_USER_INFO + AD_HOC_REPORTING_USER_INFO + +# These users are created on the ad_hoc_reporting environment +AD_HOC_REPORTING_USER_INFO: [] +# +# vars are namespace with the module name. +# +ad_hoc_reporting_role_name: ad_hoc_reporting + +# +# OS packages +# + +ad_hoc_reporting_debian_pkgs: + # for running ansible mysql module + - mysql-client-core-8.0 + # includes mysqldump and others + - mysql-client + - libmysqlclient-dev + # mongo client is installed as a separate step so it comes from the 10gen repo + +ad_hoc_reporting_pip_pkgs: + # for running ansible mysql + - PyMySQL + +MONGODB_APT_KEY: "7F0CEB10" +MONGODB_APT_KEYSERVER: "keyserver.ubuntu.com" +MONGO_VERSION_MAJOR_MINOR: "4.2" +MONGO_VERSION_PATCH: "14" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +# AD_HOC_REPORTING_REPLICA_DB_HOSTS: +# - db_host: "{{ EDXAPP_MYSQL_REPLICA_HOST }}" +# db_name: "{{ EDXAPP_MYSQL_DB_NAME }}" +# script_name: edxapp-mysql.sh +# #depends on no other vars +# depends_on: True + +AD_HOC_REPORTING_REPLICA_DB_HOSTS: [] + +AWS_RDS_IAM_AUTHENTICATION: false + +aws_rds_ca_cert_key_url: "https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem" diff --git a/playbooks/roles/ad_hoc_reporting/meta/main.yml b/playbooks/roles/ad_hoc_reporting/meta/main.yml new file mode 100644 index 00000000000..c54dfb87cce --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/meta/main.yml @@ -0,0 +1,22 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role bastion +# +dependencies: + - common + - role: user + user_info: "{{ AD_HOC_REPORTING_USER_INFO }}" + tags: + - users + - role: user + user_info: "{{ COMMON_USER_INFO }}" + tags: + - users diff --git a/playbooks/roles/ad_hoc_reporting/tasks/main.yml b/playbooks/roles/ad_hoc_reporting/tasks/main.yml new file mode 100644 index 00000000000..7b679fc8a3d --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/tasks/main.yml @@ -0,0 +1,191 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Creates scripts and users to enable ad-hoc reporting using MySQL +# read replicas and MongoDB secondary nodes. +# +# Overview: +# Creates users and scripts for ad-hoc reporting environments from your +# ansible var files. You would run this role as follows +# +# ansible-playbook -i 'reporting.example.com,' ./ad_hoc_reporting.yml -e@/var/path/common_vars.yml -e@/vars/path/environnment-deployment.yml +# +# Dependencies: +# - aws +# - user + +- name: install system packages + apt: + name: "{{ ad_hoc_reporting_debian_pkgs }}" + state: present + tags: + - install:system-requirements + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - install:system-requirements + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - install:system-requirements + +- name: install mongo shell + apt: + name: mongodb-org-shell={{ MONGO_VERSION }} + state: present + install_recommends: yes + force: yes + update_cache: yes + tags: + - install:system-requirements + +- name: install mongo shell + apt: + name: mongodb-org-tools={{ MONGO_VERSION }} + state: present + install_recommends: yes + force: yes + update_cache: yes + tags: + - install:system-requirements + +- name: install python packages + pip: + name: "{{ ad_hoc_reporting_pip_pkgs }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - install:app-requirements + +- name: create directories + file: + path: "{{ item }}" + state: directory + owner: root + group: root + mode: 0755 + with_items: + - /edx/bin + tags: + - scripts + - install:base + +- name: Get the AWS rds ca certificate to connect db using SSL + shell: "curl {{ aws_rds_ca_cert_key_url }} -o rds-combined-ca-bundle.pem" + args: + chdir: /edx/bin + when: AWS_RDS_IAM_AUTHENTICATION + tags: + - scripts + - install:base + +#These templates rely on there being a global +# read_only mysql user, you must override the default +# in order for these templates to be written out. +#Also, all of the *_REPLICA_DB_HOST vars are only defined +# in secure config files. +- name: install common mysql replica scripts + template: + src: edx/bin/mysql.sh.j2 + dest: /edx/bin/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ item.script_name }} + mode: 0755 + owner: root + group: root + when: COMMON_MYSQL_READ_ONLY_PASS is defined and item.depends_on and not AWS_RDS_IAM_AUTHENTICATION + tags: + - scripts + - scripts:mysql + - install:code + with_items: "{{ AD_HOC_REPORTING_REPLICA_DB_HOSTS }}" + +- name: install common rds iam replica scripts + template: + src: edx/bin/rds-iam.sh.j2 + dest: /edx/bin/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ item.script_name }} + mode: 0755 + owner: root + group: root + when: item.db_host|length>0 and AWS_RDS_IAM_AUTHENTICATION + tags: + - scripts + - scripts:mysql + - install:code + with_items: "{{ AD_HOC_REPORTING_REPLICA_DB_HOSTS }}" + +# These templates rely on there being a global +# read_only mongo user, you must override the default +# in order for these templates to be written out +- name: install read_only user mongodb replica scripts + template: + src: edx/bin/mongo.sh.j2 + dest: /edx/bin/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ item.script_name }} + mode: 0755 + owner: root + group: root + with_items: + - db_hosts: "{{ EDXAPP_MONGO_HOSTS }}" + db_name: "{{ EDXAPP_MONGO_DB_NAME }}" + db_port: "{{ EDXAPP_MONGO_PORT }}" + script_name: edxapp-mongo.sh + read_only_access: "{{ EDXAPP_MONGO_READ_ONLY_ACCESS }}" + - db_hosts: "{{ FORUM_MONGO_HOSTS_FOR_AD_HOC_REPORTING }}" + db_name: "{{ FORUM_MONGO_DATABASE }}" + db_port: "{{ FORUM_MONGO_PORT }}" + script_name: forum-mongo.sh + read_only_access: "{{ FORUM_MONGO_READ_ONLY_ACCESS }}" + when: COMMON_MONGO_READ_ONLY_PASS is defined and item.read_only_access + tags: + - scripts + - scripts:mongo + - install:code + +- name: install single user access mongodb replica scripts + template: + src: edx/bin/mongo-user-auth.sh.j2 + dest: /edx/bin/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ item.script_name }} + mode: 0755 + owner: root + group: root + with_items: + - db_hosts: "{{ EDXAPP_MONGO_HOSTS }}" + db_name: "{{ EDXAPP_MONGO_DB_NAME }}" + db_port: "{{ EDXAPP_MONGO_PORT }}" + script_name: edxapp-mongo-user-auth.sh + read_only_access: "{{ EDXAPP_MONGO_READ_ONLY_ACCESS }}" + - db_hosts: "{{ FORUM_MONGO_HOSTS_FOR_AD_HOC_REPORTING }}" + db_name: "{{ FORUM_MONGO_DATABASE }}" + db_port: "{{ FORUM_MONGO_PORT }}" + script_name: forum-mongo-user-auth.sh + read_only_access: "{{ FORUM_MONGO_READ_ONLY_ACCESS }}" + when: not item.read_only_access + tags: + - scripts + - scripts:mongo + - install:code + +- name: install a global mongorc.js + template: + src: etc/mongorc.js.j2 + dest: /etc/mongorc.js + mode: 0755 + owner: root + group: root + tags: + - scripts + - scripts:mongo + - mongorc diff --git a/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo-user-auth.sh.j2 b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo-user-auth.sh.j2 new file mode 100644 index 00000000000..4f7850a00d3 --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo-user-auth.sh.j2 @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +db_hosts={{ item.db_hosts }} + +username=$1 +echo "Input your password: " +read -s password + +if [[ -z $username ]]; then + echo "Username required! Rerun using the following format to connect /edx/bin/this-script.sh USERNAME" + exit 1 +fi + +for host in ${db_hosts//,/ }; do + is_secondary=$(mongo ${host}:{{ item.db_port }}/{{ item.db_name }} -u ${username} -p"${password}" --quiet --eval "printjson(db.isMaster()['secondary'])") + is_hidden=$(mongo ${host}:{{ item.db_port }}/{{ item.db_name }} -u ${username} -p"${password}" --quiet --eval "printjson(db.isMaster()['hidden'])") + if [[ $is_hidden == "true" ]]; then + replica=$host + # Found a hidden secondary no need to keep looking. + break + fi + + if [[ $is_secondary == "true" ]]; then + replica=$host + # Found a secondary but there could be a hidden secondary. + # keep looking. +fi +done + +if [[ -z $replica ]]; then + echo "No replica found for $db_hosts!" + exit 1 +fi + +mongo ${replica}:{{ item.db_port }}/{{ item.db_name }} -u ${username} -p"${password}" diff --git a/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo.sh.j2 b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo.sh.j2 new file mode 100644 index 00000000000..65e1d33f947 --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo.sh.j2 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +db_hosts={{ item.db_hosts }} +from_port={{ item.db_port }} + +for host in ${db_hosts//,/ }; do + is_secondary=$(mongo ${host}:{{ item.db_port }}/{{ item.db_name }} -u {{ COMMON_MONGO_READ_ONLY_USER }} -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" --quiet --eval "printjson(db.isMaster()['secondary'])") + is_hidden=$(mongo ${host}:{{ item.db_port }}/{{ item.db_name }} -u {{ COMMON_MONGO_READ_ONLY_USER }} -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" --quiet --eval "printjson(db.isMaster()['hidden'])") + if [[ $is_hidden == "true" ]]; then + replica=$host + # Found a hidden secondary no need to keep looking. + break + fi + + if [[ $is_secondary == "true" ]]; then + replica=$host + # Found a secondary but there could be a hidden secondary. + # keep looking. + fi +done + +if [[ -z $replica ]]; then + echo "No replica found for $from_db_hosts!" + exit 1 +fi + +mongo ${replica}:{{ item.db_port }}/{{ item.db_name }} -u {{ COMMON_MONGO_READ_ONLY_USER }} -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" diff --git a/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mysql.sh.j2 b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mysql.sh.j2 new file mode 100644 index 00000000000..6d5b4326f2d --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mysql.sh.j2 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +mysql -u {{ COMMON_MYSQL_READ_ONLY_USER }} -h {{ item.db_host }} -p"{{ COMMON_MYSQL_READ_ONLY_PASS }}" {{ item.db_name }} diff --git a/playbooks/roles/ad_hoc_reporting/templates/edx/bin/rds-iam.sh.j2 b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/rds-iam.sh.j2 new file mode 100644 index 00000000000..d711216aa21 --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/rds-iam.sh.j2 @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SECURITY_TOKEN AWS_SESSION_TOKEN + +{% if COMMON_DEPLOYMENT == 'edge' %} +ROLE_ARN={{ RDS_IAM_AUTHENTICATION_ROLE_ARN }} +RESULT=(`aws sts assume-role --role-arn $ROLE_ARN \ + --role-session-name rds_read_only_iam \ + --query '[Credentials.AccessKeyId,Credentials.SecretAccessKey,Credentials.SessionToken]' \ + --output text`) +export AWS_ACCESS_KEY_ID=${RESULT[0]} +export AWS_SECRET_ACCESS_KEY=${RESULT[1]} +export AWS_SECURITY_TOKEN=${RESULT[2]} +export AWS_SESSION_TOKEN=${AWS_SECURITY_TOKEN} +{% endif %} + +DB_HOST="{{ item.db_host }}" + +# Token won't work if it's for a CNAME, so we need to check that we have the right hostname +TOKEN_DB_HOST="${DB_HOST}" + +if echo "${TOKEN_DB_HOST}" | grep -qv '.*rds.amazonaws.com'; then + # Sed is needed to trim trailing dot which makes the token not work + TOKEN_DB_HOST=$(dig +noall +short ${TOKEN_DB_HOST} | head -n 1 | sed 's/\.$//') +fi + +logger "Generating token for ${TOKEN_DB_HOST}" + +TOKEN="$(aws rds generate-db-auth-token --hostname ${TOKEN_DB_HOST} --port 3306 --region us-east-1 --username {{ COMMON_MYSQL_READ_ONLY_USER }})" + +if [[ "$1" == "generate-db-auth-token" ]]; then + echo "Third party application access credentials" + echo "----" + echo "Hostname: ${DB_HOST}" + echo "Port: 3306" + echo "Username: read_only_iam" + echo "Password: $TOKEN" + echo " " + echo "see https://openedx.atlassian.net/wiki/spaces/EdxOps/pages/26182437/How+to+Access+a+Read+Replica for documentation" +elif [[ -z "$1" ]]; then + mysql -u {{ COMMON_MYSQL_READ_ONLY_USER }} -h ${DB_HOST} --enable-cleartext-plugin --ssl-ca=/edx/bin/rds-combined-ca-bundle.pem --password=$TOKEN {{ item.db_name }} +else + echo "USAGE:" + echo " Generates an auth token:" + echo " /edx/bin/db_iam_auth_script generate-db-auth-token" + echo " Connect to a db with IAM auth token:" + echo " /edx/bin/db_iam_auth_script" +fi diff --git a/playbooks/roles/ad_hoc_reporting/templates/etc/mongorc.js.j2 b/playbooks/roles/ad_hoc_reporting/templates/etc/mongorc.js.j2 new file mode 100644 index 00000000000..dd9f47755ad --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/templates/etc/mongorc.js.j2 @@ -0,0 +1,7 @@ +// we only ever connect to secondaries, avoid people needing to remember to type this +rs.secondaryOk(); + +// This uses the DB name rather than the replica set, which I think is more useful +var prompt = function() { + return db.getName() + "> "; +} diff --git a/playbooks/roles/add_user/defaults/main.yml b/playbooks/roles/add_user/defaults/main.yml new file mode 100644 index 00000000000..c8f1ec72758 --- /dev/null +++ b/playbooks/roles/add_user/defaults/main.yml @@ -0,0 +1,32 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Example play: +# +# Rather than being included in the play, this role +# is included as a dependency by other roles in the meta/main.yml +# file. The including role should add the following +# dependency definition. +# +# dependencies: +# - role: add_user +# user_name: edx-themes +# user_home: /edx/etc/edx-themes +# group_name: edx-themes +# dirs: +# - {path: /edx/var/edx-themes, owner: 'edx-themes', group: "edx-themes", mode: "0646"} +# - {path: /edx/etc/edx-themes, owner: 'edx-themes', group: "edx-themes", mode: "0664"} +# - ... +# +## +# Defaults for role add_user +# +# +# +dirs: [] diff --git a/playbooks/roles/add_user/meta/main.yml b/playbooks/roles/add_user/meta/main.yml new file mode 100644 index 00000000000..8a472fb05c4 --- /dev/null +++ b/playbooks/roles/add_user/meta/main.yml @@ -0,0 +1,15 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role add_user +# + +# Allow this role to be duplicated in dependencies +allow_duplicates: yes diff --git a/playbooks/roles/add_user/tasks/main.yml b/playbooks/roles/add_user/tasks/main.yml new file mode 100644 index 00000000000..160b149cab6 --- /dev/null +++ b/playbooks/roles/add_user/tasks/main.yml @@ -0,0 +1,71 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role add_user +# +# Overview: +# +# This role performs the repetitive tasks that most edX roles +# require in our default configuration. + +# create groups for the user +- name: create user groups + group: + name: "{{ item }}" + state: present + with_items: + - "{{ user_name }}" + tags: + - install + - install:base + +# Generating an ssh key so users can do a git +# clone over ssh for public repositories without any +# additional configuration +- name: create application user + user: + name: "{{ user_name }}" + home: "{{ user_home }}" + group: "{{ group_name }}" + createhome: yes + shell: /bin/false + generate_ssh_key: yes + tags: + - install + - install:base + +# Assumes that the home directory has been created above. +# In some cases(vagrant boxes) the home directory gets created +# but does not have the correct owner and group. In vagrant for +# example we were seeing it defaulting to `root` for both. +# Here we ensure that the ownership +# of the home directory is always correct before proceeding. +- name: ensure correct ownership of home directory + file: + path: "{{ user_home }}" + state: directory + owner: "{{ user_name }}" + group: "{{ group_name }}" + tags: + - install + - install:base + +- name: create dirs for the user + file: + path: "{{ item.path }}" + state: directory + owner: "{{ item.owner }}" + group: "{{ item.group }}" + mode: "{{ item.mode | default('0755') }}" + with_items: "{{ dirs }}" + tags: + - install + - install:base diff --git a/playbooks/roles/aide/defaults/main.yml b/playbooks/roles/aide/defaults/main.yml new file mode 100644 index 00000000000..4bf919c84a6 --- /dev/null +++ b/playbooks/roles/aide/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +AIDE_REPORT_EMAIL: 'root' diff --git a/playbooks/roles/aide/tasks/main.yml b/playbooks/roles/aide/tasks/main.yml new file mode 100644 index 00000000000..ca622bfe0af --- /dev/null +++ b/playbooks/roles/aide/tasks/main.yml @@ -0,0 +1,28 @@ +--- +# install and configure aide IDS +# +- name: Install aide + apt: + name: aide + state: present + +- name: Configure aide defaults + template: + src: etc/default/aide.j2 + dest: /etc/default/aide + owner: root + group: root + mode: "0644" + +- name: Open read permissions on aide logs + file: + name: /var/log/aide + recurse: yes + state: directory + mode: "0755" + +- name: Aide initial scan (this can take a long time) + command: "aideinit -y -f" + args: + creates: "/var/lib/aide/aide.db" + become: yes \ No newline at end of file diff --git a/playbooks/roles/aide/templates/etc/default/aide.j2 b/playbooks/roles/aide/templates/etc/default/aide.j2 new file mode 100644 index 00000000000..b5d2c1671e3 --- /dev/null +++ b/playbooks/roles/aide/templates/etc/default/aide.j2 @@ -0,0 +1,82 @@ +# These settings are mainly for the wrapper scripts around aide, +# such as aideinit and /etc/cron.daily/aide + +# send reports to syslog + +REPORT_URL=syslog:LOG_LOCAL1 + +# This is used as the host name in the AIDE reports that are sent out +# via e-mail. It defaults to the output of $(hostname --fqdn), but can +# be set to arbitrary values. +# FQDN= + +# This is used as the subject for the e-mail reports. +# If your mail system only threads by subject, you might want to add +# some variable content here (for example $(date +%Y-%m-%d)). +MAILSUBJ="Daily AIDE report for $FQDN" + + +# This is the email address reports get mailed to +# default is root +# This variable is expanded before it is used, so you can use variables +# here. For example, MAILTO=$FQDN-aide@domain.example will send the +# report to host.name.example-aide@domain.example is the local FQDN is +# host.name.example. +MAILTO={{ AIDE_REPORT_EMAIL }} + +# Set this to yes to suppress mailings when no changes have been +# detected during the AIDE run and no error output was given. +#QUIETREPORTS=no + +# This parameter defines which AIDE command to run from the cron script. +# Sensible values are "update" and "check". +# Default is "check", ensuring backwards compatibility. +# Since "update" does not take any longer, it is recommended to use "update", +# so that a new database is created every day. The new database needs to be +# manually copied over the current one, though. +COMMAND=update + +# This parameter defines what to do with a new database created by +# COMMAND=update. It is ignored if COMMAND!=update. +# no: Do not copy new database to old database. This is the default. +# yes: Copy new database to old database. This means that changes to the +# file system are only reported once. Possibly dangerous. +# ifnochange: Copy new database to old database if no changes have +# been reported. This is needed for ANF/ARF to work reliably. +COPYNEWDB=no + +# Set this to yes to truncate the detailed changes part in the mail. The full +# output will still be listed in the log file. +TRUNCATEDETAILS=yes + +# Set this to yes to suppress file changes by package and security +# updates from appearing in the e-mail report. Filtered file changes will +# still be listed in the log file. This option parses the /var/log/dpkg.log +# file and implies TRUNCATEDETAILS=yes +FILTERUPDATES=yes + +# Set this to yes to suppress file changes by package installations +# from appearing in the e-mail report. Filtered file changes will still +# be listed in the log file. This option parses the /var/log/dpkg.log file and +# implies TRUNCATEDETAILS=yes. +FILTERINSTALLATIONS=yes + +# This parameter defines how many lines to return per e-mail. Output longer +# than this value will be truncated in the e-mail sent out. +# Set value to "0" to disable this option. +LINES=1000 + +# This parameter gives a grep regular expression. If given, all output lines +# that _don't_ match the regexp are listed first in the script's output. This +# allows to easily remove noise from the AIDE report. +NOISE="" + +# This parameter defines which options are given to aide in the daily +# cron job. The default is "-V4". +AIDEARGS="" + +# These parameters control update-aide.conf and give the defaults for +# the --confdir, --confd and --settingsd options +# UPAC_CONFDIR="/etc/aide" +# UPAC_CONFD="$UPAC_CONFDIR/aide.conf.d" +# UPAC_SETTINGSD="$UPAC_CONFDIR/aide.settings.d" diff --git a/playbooks/roles/analytics-server/defaults/main.yml b/playbooks/roles/analytics-server/defaults/main.yml deleted file mode 100644 index ca7eab0e1cd..00000000000 --- a/playbooks/roles/analytics-server/defaults/main.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# Vars for role analytics-server -# vars are namespace with the module name. -# - -AS_DB_ANALYTICS_PASSWORD: 'CHANGEME!' -AS_DB_ANALYTICS_USER: 'analytics001' -AS_DB_ANALYTICS_HOST: 'localhost' -AS_SERVER_PORT: '9000' -AS_ENV_LANG: 'en_US.UTF-8' -AS_LOG_LEVEL: 'INFO' -AS_WORKERS: '4' - -DATABASES: - default: &databases_default - ENGINE: 'django.db.backends.mysql' - NAME: 'wwc' - USER: 'analytics001' - PASSWORD: 'CHANGEME!' - HOST: 'CHANGEME' - PORT: 3306 - -analytics_auth_config: - DATABASES: - analytics: - <<: *databases_default - USER: $AS_DB_ANALYTICS_USER - PASSWORD: $AS_DB_ANALYTICS_PASSWORD - HOST: $AS_DB_ANALYTICS_HOST - ANALYTICS_API_KEY: $AS_API_KEY - ANALYTICS_RESULTS_DB: - MONGO_URI: $AS_DB_RESULTS_URL - MONGO_DB: $AS_DB_RESULTS_DB - MONGO_STORED_QUERIES_COLLECTION: $AS_DB_RESULTS_COLLECTION - -as_role_name: "analytics-server" -as_user: "analytics-server" -as_home: "/opt/wwc/analytics-server" -as_venv_dir: "{{ as_home }}/virtualenvs/analytics-server" -as_source_repo: "git@github.com:edx/analytics-server.git" -as_code_dir: "{{ as_home }}/src" -as_version: "master" -as_git_identity_path: "{{ secure_dir }}/files/git-identity" -as_git_identity_dest: "/etc/{{ as_role_name }}.git-identity" -as_git_ssh: "/tmp/{{ as_role_name }}.git_ssh.sh" -as_requirements_file: "{{ as_code_dir }}/requirements.txt" -as_rsyslog_enabled: "yes" -as_web_user: "www-data" -as_env: "analytics-server_env" -as_service_variant: 'analytics' -as_django_settings: 'anserv.settings' - -as_env_vars: - ANALYTICS_SERVER_LOG_LEVEL: "{{ AS_LOG_LEVEL }}" - -# -# Used by the included role, automated. -# See meta/main.yml -# -as_automated_rbash_links: - - /usr/bin/sudo - - /usr/bin/scp - -# -# OS packages -# - -as_debian_pkgs: - - mongodb-clients - - zip - - libmysqlclient-dev - -as_redhat_pkgs: - - zip - - community-mysql-libs - -# -# Installed via pip to get the IAM role feature. -# -as_pip_pkgs: - - git+https://github.com/s3tools/s3cmd.git#egg=s3cmd diff --git a/playbooks/roles/analytics-server/files/etc/sudoers.d/99-automator-analytics-server b/playbooks/roles/analytics-server/files/etc/sudoers.d/99-automator-analytics-server deleted file mode 100644 index 50f20a4b344..00000000000 --- a/playbooks/roles/analytics-server/files/etc/sudoers.d/99-automator-analytics-server +++ /dev/null @@ -1 +0,0 @@ -automator ALL=(www-data) NOPASSWD:SETENV:/opt/wwc/analytics-server/virtualenvs/analytics-server/bin/django-admin.py run_all_queries * diff --git a/playbooks/roles/analytics-server/files/git_ssh.sh b/playbooks/roles/analytics-server/files/git_ssh.sh deleted file mode 100644 index ef0bc615e57..00000000000 --- a/playbooks/roles/analytics-server/files/git_ssh.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i /etc/git-identity "$@" diff --git a/playbooks/roles/analytics-server/handlers/main.yml b/playbooks/roles/analytics-server/handlers/main.yml deleted file mode 100644 index 4f8715216bd..00000000000 --- a/playbooks/roles/analytics-server/handlers/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Handlers for role analytics-server -# -# Overview: -# -# - -- name: stop the analytics service - service: name=analytics state=stopped - -- name: start the analytics service - service: name=analytics state=started diff --git a/playbooks/roles/analytics-server/meta/main.yml b/playbooks/roles/analytics-server/meta/main.yml deleted file mode 100644 index 1c5dd708c47..00000000000 --- a/playbooks/roles/analytics-server/meta/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -dependencies: - - { - role: automated, - automated_rbash_links: $as_automated_rbash_links, - autmoated_sudoers_dest: '99-automator-analytics-server', - automated_sudoers_template: 'roles/analytics-server/templates/etc/sudoers.d/99-automator-analytics-server.j2' - } - - diff --git a/playbooks/roles/analytics-server/tasks/deploy.yml b/playbooks/roles/analytics-server/tasks/deploy.yml deleted file mode 100644 index 34094e6efe0..00000000000 --- a/playbooks/roles/analytics-server/tasks/deploy.yml +++ /dev/null @@ -1,80 +0,0 @@ -# -# TODO: Needed while this repo is private -# -- name: upload ssh script - template: - src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }} - force=yes owner=root group=adm mode=750 - tags: - - analytics-server - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: install read-only ssh key required for checkout - copy: - src={{ as_git_identity_path }} dest={{ as_git_identity_dest }} - force=yes owner=ubuntu group=adm mode=0600 - tags: - - analytics-server - - install - - update - -- name: checkout code - git: - dest={{ as_code_dir }} repo={{ as_source_repo }} - version={{ as_version }} force=true - environment: - GIT_SSH: $as_git_ssh - notify: restart the analytics service - notify: start the analytics service - tags: - - analytics-server - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: update src permissions - file: - path={{ as_code_dir }} state=directory owner={{ as_user }} - group={{ as_web_user }} mode=2750 recurse=yes - tags: - - analytics-server - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: remove read-only ssh key for the content repo - file: path={{ as_git_identity_dest }} state=absent - tags: - - analytics-server - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: remove ssh script - file: path={{ as_git_ssh }} state=absent - tags: - - analytics-server - - install - - update - -- name: install application requirements - pip: - requirements={{ as_requirements_file }} - virtualenv={{ as_venv_dir }} state=present - sudo: true - sudo_user: "{{ as_user }}" - notify: start the analytics service - tags: - - analytics-server - - install - - update diff --git a/playbooks/roles/analytics-server/tasks/main.yml b/playbooks/roles/analytics-server/tasks/main.yml deleted file mode 100644 index 1a7472b4b1c..00000000000 --- a/playbooks/roles/analytics-server/tasks/main.yml +++ /dev/null @@ -1,136 +0,0 @@ ---- - -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role analytics-server -# -# Overview: -# -# Installs the edX analytics-server Django application which provides -# basic analytics to the LMS instructor dashboard via service calls. -# -# Dependencies: -# -# common role -# -# Depends upon the automated role -# -# Example play: -# -# - name: Configure analytics instance(s) -# hosts: analytics-servers -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/common/common.yml" -# - "{{ secure_dir }}/vars/stage/analytics-server.yml" -# - "{{ secure_dir }}/vars/users.yml" -# gather_facts: True -# roles: -# - common -# - analytics-server -# -- name: install system packages - apt: pkg={{','.join(as_debian_pkgs)}} state=present - tags: - - analytics-server - - install - - update - -- name: create analytics-server user {{ as_user }} - user: - name={{ as_user }} state=present shell=/bin/bash - home={{ as_home }} createhome=yes - tags: - - analytics-server - - install - - update - -- name: setup the analytics-server env - template: - src=opt/wwc/analytics-server/{{ as_env }}.j2 - dest={{ as_home }}/{{ as_env }} - owner="{{ as_user }}" group="{{ as_user }}" - tags: - - analytics-server - - install - - update - -- name: drop a bash_profile - copy: > - src=../../common/files/bash_profile - dest={{ as_home }}/.bash_profile - owner={{ as_user }} - group={{ as_user }} - -# Awaiting next ansible release. -#- name: ensure .bashrc exists -# file: path={{ as_home }}/.bashrc state=touch -# sudo: true -# sudo_user: "{{ as_user }}" -# tags: -# - analytics-server -# - install -# - update - -- name: ensure .bashrc exists - shell: touch {{ as_home }}/.bashrc - sudo: true - sudo_user: "{{ as_user }}" - tags: - - analytics-server - - install - - update - -- name: add source of analytics-server_env to .bashrc - lineinfile: - dest={{ as_home }}/.bashrc - regexp='. {{ as_home }}/analytics-server_env' - line='. {{ as_home }}/analytics_server_env' - tags: - - analytics-server - - install - - update - -- name: add source venv to .bashrc - lineinfile: - dest={{ as_home }}/.bashrc - regexp='. {{ as_venv_dir }}/bin/activate' - line='. {{ as_venv_dir }}/bin/activate' - tags: - - analytics-server - - install - - update - -- name: install global python requirements - pip: name={{ item }} - with_items: as_pip_pkgs - tags: - - analytics-server - - install - - update - -- name: create config - template: - src=opt/wwc/analytics.auth.json.j2 - dest=/opt/wwc/analytics.auth.json - mode=0600 - owner="{{ as_web_user }}" group="{{ as_web_user }}" - tags: - - analytics-server - - install - - update - -- name: install service - template: - src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf - owner=root group=root - -- include: deploy.yml tags=deploy diff --git a/playbooks/roles/analytics-server/templates/etc/init/analytics.conf.j2 b/playbooks/roles/analytics-server/templates/etc/init/analytics.conf.j2 deleted file mode 100644 index c060f623580..00000000000 --- a/playbooks/roles/analytics-server/templates/etc/init/analytics.conf.j2 +++ /dev/null @@ -1,21 +0,0 @@ -# {{ ansible_managed }} - -description "Analytics server under gunicorn" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn -respawn limit 3 30 - -env SERVICE_VARIANT={{ as_service_variant }} -env PID=/var/tmp/analytics.pid -env WORKERS={{ AS_WORKERS }} -env PORT={{ AS_SERVER_PORT }} -env LANG={{ AS_ENV_LANG }} -env DJANGO_SETTINGS_MODULE={{ as_django_settings }} - -chdir {{ as_code_dir }} -setuid {{ as_web_user }} - -exec {{ as_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ as_code_dir }}/anserv anserv.wsgi diff --git a/playbooks/roles/analytics-server/templates/opt/wwc/analytics-server/analytics-server_env.j2 b/playbooks/roles/analytics-server/templates/opt/wwc/analytics-server/analytics-server_env.j2 deleted file mode 100644 index f9cd2df5699..00000000000 --- a/playbooks/roles/analytics-server/templates/opt/wwc/analytics-server/analytics-server_env.j2 +++ /dev/null @@ -1,7 +0,0 @@ -# {{ ansible_managed }} - -{% for name,value in as_env_vars.items() %} -{% if value %} -export {{ name }}="{{ value }}" -{% endif %} -{% endfor %} \ No newline at end of file diff --git a/playbooks/roles/analytics-server/templates/opt/wwc/analytics.auth.json.j2 b/playbooks/roles/analytics-server/templates/opt/wwc/analytics.auth.json.j2 deleted file mode 100644 index 4d3b6760d70..00000000000 --- a/playbooks/roles/analytics-server/templates/opt/wwc/analytics.auth.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ analytics_auth_config | to_nice_json }} diff --git a/playbooks/roles/analytics-server/templates/tmp/analytics-server.git_ssh.sh.j2 b/playbooks/roles/analytics-server/templates/tmp/analytics-server.git_ssh.sh.j2 deleted file mode 100644 index bd6202f71d8..00000000000 --- a/playbooks/roles/analytics-server/templates/tmp/analytics-server.git_ssh.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ as_git_identity_dest }} "$@" diff --git a/playbooks/roles/analytics/defaults/main.yml b/playbooks/roles/analytics/defaults/main.yml deleted file mode 100644 index 6d7a5e4863d..00000000000 --- a/playbooks/roles/analytics/defaults/main.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# Vars for role analytics -# vars are namespace with the module name. -# - -ANALYTICS_DB_ANALYTICS_PASSWORD: 'CHANGEME!' -ANALYTICS_DB_ANALYTICS_USER: 'analytics001' -ANALYTICS_DB_ANALYTICS_HOST: 'localhost' -ANALYTICS_SERVER_PORT: '9000' -ANALYTICS_ENV_LANG: 'en_US.UTF-8' -ANALYTICS_LOG_LEVEL: 'INFO' -ANALYTICS_WORKERS: '4' - -DATABASES: - default: &databases_default - ENGINE: 'django.db.backends.mysql' - NAME: 'wwc' - USER: 'analytics001' - PASSWORD: 'CHANGEME!' - HOST: 'CHANGEME' - PORT: 3306 - -analytics_auth_config: - DATABASES: - analytics: - <<: *databases_default - USER: $ANALYTICS_DB_ANALYTICS_USER - PASSWORD: $ANALYTICS_DB_ANALYTICS_PASSWORD - HOST: $ANALYTICS_DB_ANALYTICS_HOST - ANALYTICS_API_KEY: $ANALYTICS_API_KEY - ANALYTICS_RESULTS_DB: - MONGO_URI: $ANALYTICS_DB_RESULTS_URL - MONGO_DB: $ANALYTICS_DB_RESULTS_DB - MONGO_STORED_QUERIES_COLLECTION: $ANALYTICS_DB_RESULTS_COLLECTION - -analytics_role_name: "analytics" -analytics_user: "analytics" -analytics_home: "/opt/wwc/analytics" -analytics_venv_dir: "{{ analytics_home }}/virtualenvs/analytics" -analytics_source_repo: "git@github.com:edx/analytics-server.git" -analytics_code_dir: "{{ analytics_home }}/src" -analytics_version: "master" -analytics_git_identity_path: "{{ secure_dir }}/files/git-identity" -analytics_git_identity_dest: "/etc/{{ analytics_role_name }}.git-identity" -analytics_git_ssh: "/tmp/{{ analytics_role_name }}.git_ssh.sh" -analytics_requirements_file: "{{ analytics_code_dir }}/requirements.txt" -analytics_rsyslog_enabled: "yes" -analytics_web_user: "www-data" -analytics_env: "analytics_env" -analytics_service_variant: 'analytics' -analytics_django_settings: 'anserv.settings' - -analytics_env_vars: - ANALYTICS_LOG_LEVEL: "{{ ANALYTICS_LOG_LEVEL }}" - -# -# Used by the included role, automated. -# See meta/main.yml -# -analytics_automated_rbash_links: - - /usr/bin/sudo - - /usr/bin/scp - -# -# OS packages -# - -analytics_debian_pkgs: - - mongodb-clients - - zip - - libmysqlclient-dev - -analytics_redhat_pkgs: - - zip - - community-mysql-libs - -# -# Installed via pip to get the IAM role feature. -# -analytics_pip_pkgs: - - git+https://github.com/s3tools/s3cmd.git#egg=s3cmd diff --git a/playbooks/roles/analytics/handlers/main.yml b/playbooks/roles/analytics/handlers/main.yml deleted file mode 100644 index 9978164f652..00000000000 --- a/playbooks/roles/analytics/handlers/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Handlers for role analytics -# -# Overview: -# -# - -- name: stop the analytics service - service: name=analytics state=stopped - -- name: start the analytics service - service: name=analytics state=started diff --git a/playbooks/roles/analytics/tasks/deploy.yml b/playbooks/roles/analytics/tasks/deploy.yml deleted file mode 100644 index 298107251c8..00000000000 --- a/playbooks/roles/analytics/tasks/deploy.yml +++ /dev/null @@ -1,80 +0,0 @@ -# -# TODO: Needed while this repo is private -# -- name: upload ssh script - template: - src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }} - force=yes owner=root group=adm mode=750 - tags: - - analytics - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: install read-only ssh key required for checkout - copy: - src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }} - force=yes owner=ubuntu group=adm mode=0600 - tags: - - analytics - - install - - update - -- name: checkout code - git: - dest={{ analytics_code_dir }} repo={{ analytics_source_repo }} - version={{ analytics_version }} force=true - environment: - GIT_SSH: $analytics_git_ssh - notify: restart the analytics service - notify: start the analytics service - tags: - - analytics - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: update src permissions - file: - path={{ analytics_code_dir }} state=directory owner={{ analytics_user }} - group={{ analytics_web_user }} mode=2750 recurse=yes - tags: - - analytics - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: remove read-only ssh key for the content repo - file: path={{ analytics_git_identity_dest }} state=absent - tags: - - analytics - - install - - update - -# -# TODO: Needed while this repo is private -# -- name: remove ssh script - file: path={{ analytics_git_ssh }} state=absent - tags: - - analytics - - install - - update - -- name: install application requirements - pip: - requirements={{ analytics_requirements_file }} - virtualenv={{ analytics_venv_dir }} state=present - sudo: true - sudo_user: "{{ analytics_user }}" - notify: start the analytics service - tags: - - analytics - - install - - update diff --git a/playbooks/roles/analytics/tasks/main.yml b/playbooks/roles/analytics/tasks/main.yml deleted file mode 100644 index e1bf2626339..00000000000 --- a/playbooks/roles/analytics/tasks/main.yml +++ /dev/null @@ -1,136 +0,0 @@ ---- - -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role analytics -# -# Overview: -# -# Installs the edX analytics Django application which provides -# basic analytics to the LMS instructor dashboard via service calls. -# -# Dependencies: -# -# common role -# -# Depends upon the automated role -# -# Example play: -# -# - name: Configure analytics instance(s) -# hosts: analyticss -# sudo: True -# vars_files: -# - "{{ secure_dir }}/vars/common/common.yml" -# - "{{ secure_dir }}/vars/stage/analytics.yml" -# - "{{ secure_dir }}/vars/users.yml" -# gather_facts: True -# roles: -# - common -# - analytics -# -- name: install system packages - apt: pkg={{','.join(analytics_debian_pkgs)}} state=present - tags: - - analytics - - install - - update - -- name: create analytics user {{ analytics_user }} - user: - name={{ analytics_user }} state=present shell=/bin/bash - home={{ analytics_home }} createhome=yes - tags: - - analytics - - install - - update - -- name: setup the analytics env - template: - src=opt/wwc/analytics/{{ analytics_env }}.j2 - dest={{ analytics_home }}/{{ analytics_env }} - owner="{{ analytics_user }}" group="{{ analytics_user }}" - tags: - - analytics - - install - - update - -- name: drop a bash_profile - copy: > - src=../../common/files/bash_profile - dest={{ analytics_home }}/.bash_profile - owner={{ analytics_user }} - group={{ analytics_user }} - -# Awaiting next ansible release. -#- name: ensure .bashrc exists -# file: path={{ analytics_home }}/.bashrc state=touch -# sudo: true -# sudo_user: "{{ analytics_user }}" -# tags: -# - analytics -# - install -# - update - -- name: ensure .bashrc exists - shell: touch {{ analytics_home }}/.bashrc - sudo: true - sudo_user: "{{ analytics_user }}" - tags: - - analytics - - install - - update - -- name: add source of analytics_env to .bashrc - lineinfile: - dest={{ analytics_home }}/.bashrc - regexp='. {{ analytics_home }}/analytics_env' - line='. {{ analytics_home }}/analytics_env' - tags: - - analytics - - install - - update - -- name: add source venv to .bashrc - lineinfile: - dest={{ analytics_home }}/.bashrc - regexp='. {{ analytics_venv_dir }}/bin/activate' - line='. {{ analytics_venv_dir }}/bin/activate' - tags: - - analytics - - install - - update - -- name: install global python requirements - pip: name={{ item }} - with_items: analytics_pip_pkgs - tags: - - analytics - - install - - update - -- name: create config - template: - src=opt/wwc/analytics.auth.json.j2 - dest=/opt/wwc/analytics.auth.json - mode=0600 - owner="{{ analytics_web_user }}" group="{{ analytics_web_user }}" - tags: - - analytics - - install - - update - -- name: install service - template: - src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf - owner=root group=root - -- include: deploy.yml tags=deploy diff --git a/playbooks/roles/analytics/templates/etc/init/analytics.conf.j2 b/playbooks/roles/analytics/templates/etc/init/analytics.conf.j2 deleted file mode 100644 index f1e797a240f..00000000000 --- a/playbooks/roles/analytics/templates/etc/init/analytics.conf.j2 +++ /dev/null @@ -1,21 +0,0 @@ -# {{ ansible_managed }} - -description "Analytics gunicorn" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn -respawn limit 3 30 - -env SERVICE_VARIANT={{ analytics_service_variant }} -env PID=/var/tmp/analytics.pid -env WORKERS={{ ANALYTICS_WORKERS }} -env PORT={{ ANALYTICS_SERVER_PORT }} -env LANG={{ ANALYTICS_ENV_LANG }} -env DJANGO_SETTINGS_MODULE={{ analytics_django_settings }} - -chdir {{ analytics_code_dir }} -setuid {{ analytics_web_user }} - -exec {{ analytics_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ analytics_code_dir }}/anserv anserv.wsgi diff --git a/playbooks/roles/analytics/templates/etc/sudoers.d/99-automator-analytics.j2 b/playbooks/roles/analytics/templates/etc/sudoers.d/99-automator-analytics.j2 deleted file mode 100644 index cd5ee5742ad..00000000000 --- a/playbooks/roles/analytics/templates/etc/sudoers.d/99-automator-analytics.j2 +++ /dev/null @@ -1 +0,0 @@ -automator ALL=({{ analytics_web_user }}) NOPASSWD:SETENV:{{ analytics_venv_dir }}/bin/django-admin.py run_all_queries * diff --git a/playbooks/roles/analytics/templates/opt/wwc/analytics.auth.json.j2 b/playbooks/roles/analytics/templates/opt/wwc/analytics.auth.json.j2 deleted file mode 100644 index 4d3b6760d70..00000000000 --- a/playbooks/roles/analytics/templates/opt/wwc/analytics.auth.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ analytics_auth_config | to_nice_json }} diff --git a/playbooks/roles/analytics/templates/opt/wwc/analytics/analytics_env.j2 b/playbooks/roles/analytics/templates/opt/wwc/analytics/analytics_env.j2 deleted file mode 100644 index 3031abf6dfc..00000000000 --- a/playbooks/roles/analytics/templates/opt/wwc/analytics/analytics_env.j2 +++ /dev/null @@ -1,7 +0,0 @@ -# {{ ansible_managed }} - -{% for name,value in analytics_env_vars.items() %} -{% if value %} -export {{ name }}="{{ value }}" -{% endif %} -{% endfor %} diff --git a/playbooks/roles/analytics/templates/tmp/analytics.git_ssh.sh.j2 b/playbooks/roles/analytics/templates/tmp/analytics.git_ssh.sh.j2 deleted file mode 100644 index b631a61f5cc..00000000000 --- a/playbooks/roles/analytics/templates/tmp/analytics.git_ssh.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ analytics_git_identity_dest }} "$@" diff --git a/playbooks/roles/analytics_api/defaults/main.yml b/playbooks/roles/analytics_api/defaults/main.yml new file mode 100644 index 00000000000..c2f05cc51f9 --- /dev/null +++ b/playbooks/roles/analytics_api/defaults/main.yml @@ -0,0 +1,192 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role analytics_api +# + +ANALYTICS_API_GIT_IDENTITY: !!null + +# +# vars are namespace with the module name +# + +analytics_api_service_name: "analytics_api" +analytics_api_gunicorn_port: "8100" + +ANALYTICS_API_DJANGO_SETTINGS_MODULE: "analyticsdataserver.settings.production" + +analytics_api_environment: + ANALYTICS_API_CFG: "{{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yml" + DJANGO_SETTINGS_MODULE: "{{ ANALYTICS_API_DJANGO_SETTINGS_MODULE }}" + +analytics_api_home: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}" +analytics_api_user: "{{ analytics_api_service_name }}" +analytics_api_code_dir: "{{ analytics_api_home }}/{{ analytics_api_service_name }}" + +analytics_api_wsgi_name: "analyticsdataserver" +analytics_api_hostname: "analytics-api" + +analytics_api_newrelic_appname: 'analytics-api' + +# +# OS packages +# +analytics_api_debian_pkgs: + - 'libmysqlclient-dev' + +ANALYTICS_API_USE_PYTHON38: True + +ANALYTICS_API_VERSION: "master" +ANALYTICS_API_NGINX_PORT: '1{{ analytics_api_gunicorn_port }}' +ANALYTICS_API_SSL_NGINX_PORT: '4{{ analytics_api_gunicorn_port }}' + +ANALYTICS_API_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: edx-analytics-data-api.git + VERSION: "{{ ANALYTICS_API_VERSION }}" + DESTINATION: "{{ analytics_api_code_dir }}" + SSH_KEY: "{{ ANALYTICS_API_GIT_IDENTITY }}" + + +ANALYTICS_API_SECRET_KEY: 'Your secret key here' +ANALYTICS_API_AUTH_TOKEN: 'put-your-api-token-here' + +ANALYTICS_API_DEFAULT_DB_NAME: 'analytics-api' +ANALYTICS_API_DEFAULT_USER: 'api001' +ANALYTICS_API_DEFAULT_PASSWORD: 'password' +ANALYTICS_API_DEFAULT_HOST: 'localhost' +ANALYTICS_API_DEFAULT_PORT: '3306' +ANALYTICS_API_DEFAULT_MYSQL_OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + +ANALYTICS_API_REPORTS_DB_NAME: 'reports' +ANALYTICS_API_REPORTS_USER: 'reports001' +ANALYTICS_API_REPORTS_PASSWORD: 'password' +ANALYTICS_API_REPORTS_HOST: 'localhost' +ANALYTICS_API_REPORTS_PORT: '3306' +ANALYTICS_API_REPORTS_MYSQL_OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + +ANALYTICS_API_DATABASES: + # rw user + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ ANALYTICS_API_DEFAULT_DB_NAME }}' + USER: '{{ ANALYTICS_API_DEFAULT_USER }}' + PASSWORD: '{{ ANALYTICS_API_DEFAULT_PASSWORD }}' + HOST: '{{ ANALYTICS_API_DEFAULT_HOST }}' + PORT: '{{ ANALYTICS_API_DEFAULT_PORT }}' + OPTIONS: "{{ ANALYTICS_API_DEFAULT_MYSQL_OPTIONS }}" + # read-only user + reports: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ ANALYTICS_API_REPORTS_DB_NAME }}' + USER: '{{ ANALYTICS_API_REPORTS_USER }}' + PASSWORD: '{{ ANALYTICS_API_REPORTS_PASSWORD }}' + HOST: '{{ ANALYTICS_API_REPORTS_HOST }}' + PORT: '{{ ANALYTICS_API_REPORTS_PORT }}' + OPTIONS: "{{ ANALYTICS_API_REPORTS_MYSQL_OPTIONS }}" + +# This CONFIG_ALIAS needs to match the key in the above DATABASES config +ANALYTICS_DB_CONFIG_ALIAS: 'reports' + +ANALYTICS_API_ELASTICSEARCH_LEARNERS_HOST: 'localhost' +ANALYTICS_API_ELASTICSEARCH_LEARNERS_INDEX: 'roster_1_2' +ANALYTICS_API_ELASTICSEARCH_LEARNERS_INDEX_ALIAS: 'roster_1_2' +ANALYTICS_API_ELASTICSEARCH_LEARNERS_UPDATE_INDEX: 'index_updates' +ANALYTICS_API_ELASTICSEARCH_CONNECTION_CLASS: !!null +ANALYTICS_API_ELASTICSEARCH_AWS_ACCESS_KEY_ID: !!null +ANALYTICS_API_ELASTICSEARCH_AWS_SECRET_ACCESS_KEY: !!null +ANALYTICS_API_ELASTICSEARCH_CONNECTION_DEFAULT_REGION: 'us-east-1' +ANALYTICS_API_DATE_FORMAT: '%Y-%m-%d' +ANALYTICS_API_DATETIME_FORMAT: '%Y-%m-%dT%H%M%S' +ANALYTICS_API_DEFAULT_PAGE_SIZE: 25 +ANALYTICS_API_MAX_PAGE_SIZE: 100 +ANALYTICS_API_AGGREGATE_PAGE_SIZE: 10 + +# Example settings to use Amazon S3 as a storage backend for course reports. +# +# This storage mechanism also provides support for pre-authenticated URLs with an expiry time, +# allowing temporary report downloads from secured S3 file locations. +# +# ANALYTICS_API_REPORT_DOWNLOAD_BACKEND: +# DEFAULT_FILE_STORAGE: 'storages.backends.s3boto3.S3Boto3Storage' +# AWS_ACCESS_KEY_ID: 'put-your-access-key-id-here' +# AWS_SECRET_ACCESS_KEY: 'put-your-secret-access-key-here' +# AWS_STORAGE_BUCKET_NAME: 'report-download-bucket' +# COURSE_REPORT_FILE_LOCATION_TEMPLATE: '/{course_id}_{report_name}.csv' +# COURSE_REPORT_DOWNLOAD_EXPIRY_TIME: 120 +# +# By default, instead we use the local filesystem. +# +# Other storage providers can be used, as long as they provide the .url() method. + +ANALYTICS_API_BASE_URL: 'http://localhost:8100/' +ANALYTICS_API_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}' +ANALYTICS_API_MEDIA_ROOT: '{{ ANALYTICS_API_DATA_DIR }}/static/reports' +ANALYTICS_API_MEDIA_URL: '/static/reports/' + +ANALYTICS_API_REPORT_DOWNLOAD_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ ANALYTICS_API_MEDIA_ROOT }}' + MEDIA_URL: '{{ ANALYTICS_API_BASE_URL }}{{ ANALYTICS_API_MEDIA_URL }}' + COURSE_REPORT_FILE_LOCATION_TEMPLATE: '{course_id}_{report_name}.csv' + +ANALYTICS_API_CSRF_COOKIE_SECURE: false + +ANALYTICS_API_ALLOW_CORS_HEADERS: true +ANALYTICS_API_ALLOW_CORS_CREDENTIALS: true +ANALYTICS_API_CORS_ORIGIN_WHITELIST: [] +ANALYTICS_API_BASIC_AUTH_EXEMPTED_PATHS: + - 'enterprise' + +analytics_api_service_config_overrides: + API_AUTH_TOKEN: '{{ ANALYTICS_API_AUTH_TOKEN }}' + STATICFILES_DIRS: ['static'] + STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles" + LMS_BASE_URL: "{{ ANALYTICS_API_LMS_BASE_URL | default('http://127.0.0.1:8000/') }}" + + # db config + ANALYTICS_DATABASE: '{{ ANALYTICS_DB_CONFIG_ALIAS }}' + DATABASES: '{{ ANALYTICS_API_DATABASES }}' + ELASTICSEARCH_LEARNERS_HOST: '{{ ANALYTICS_API_ELASTICSEARCH_LEARNERS_HOST }}' + ELASTICSEARCH_LEARNERS_INDEX: '{{ ANALYTICS_API_ELASTICSEARCH_LEARNERS_INDEX }}' + ELASTICSEARCH_LEARNERS_INDEX_ALIAS: '{{ ANALYTICS_API_ELASTICSEARCH_LEARNERS_INDEX_ALIAS }}' + ELASTICSEARCH_LEARNERS_UPDATE_INDEX: '{{ ANALYTICS_API_ELASTICSEARCH_LEARNERS_UPDATE_INDEX }}' + ELASTICSEARCH_CONNECTION_CLASS: '{{ ANALYTICS_API_ELASTICSEARCH_CONNECTION_CLASS }}' + ELASTICSEARCH_AWS_ACCESS_KEY_ID: '{{ ANALYTICS_API_ELASTICSEARCH_AWS_ACCESS_KEY_ID }}' + ELASTICSEARCH_AWS_SECRET_ACCESS_KEY: '{{ ANALYTICS_API_ELASTICSEARCH_AWS_SECRET_ACCESS_KEY }}' + ELASTICSEARCH_CONNECTION_DEFAULT_REGION: '{{ ANALYTICS_API_ELASTICSEARCH_CONNECTION_DEFAULT_REGION }}' + DATE_FORMAT: '{{ ANALYTICS_API_DATE_FORMAT }}' + DATETIME_FORMAT: '{{ ANALYTICS_API_DATETIME_FORMAT }}' + DEFAULT_PAGE_SIZE: '{{ ANALYTICS_API_DEFAULT_PAGE_SIZE }}' + MAX_PAGE_SIZE: '{{ ANALYTICS_API_MAX_PAGE_SIZE }}' + AGGREGATE_PAGE_SIZE: '{{ ANALYTICS_API_AGGREGATE_PAGE_SIZE }}' + REPORT_DOWNLOAD_BACKEND: '{{ ANALYTICS_API_REPORT_DOWNLOAD_BACKEND }}' + CSRF_COOKIE_SECURE: "{{ ANALYTICS_API_CSRF_COOKIE_SECURE }}" + +# Default dummy user, override this!! +ANALYTICS_API_USERS: + "dummy-api-user": "changeme" + +ANALYTICS_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +ANALYTICS_API_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ANALYTICS_API_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ANALYTICS_API_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# See edx_django_service_automated_users for an example of what this should be +ANALYTICS_API_AUTOMATED_USERS: {} diff --git a/playbooks/roles/analytics_api/meta/main.yml b/playbooks/roles/analytics_api/meta/main.yml new file mode 100644 index 00000000000..7ab6f3f95ae --- /dev/null +++ b/playbooks/roles/analytics_api/meta/main.yml @@ -0,0 +1,53 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Role includes for role analytics_api +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - role: edx_django_service + edx_django_service_use_python38: '{{ ANALYTICS_API_USE_PYTHON38 }}' + edx_django_service_repos: '{{ ANALYTICS_API_REPOS }}' + edx_django_service_name: '{{ analytics_api_service_name }}' + edx_django_service_user: '{{ analytics_api_user }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}' + edx_django_service_config_overrides: '{{ analytics_api_service_config_overrides }}' + edx_django_service_nginx_port: '{{ ANALYTICS_API_NGINX_PORT }}' + edx_django_service_nginx_read_timeout: 300 + edx_django_service_ssl_nginx_port: '{{ ANALYTICS_API_SSL_NGINX_PORT }}' + edx_django_service_default_db_host: '{{ ANALYTICS_API_DEFAULT_HOST }}' + edx_django_service_default_db_name: '{{ ANALYTICS_API_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ ANALYTICS_API_DEFAULT_USER }}' + edx_django_service_db_password: '{{ ANALYTICS_API_DEFAULT_PASSWORD }}' + edx_django_service_debian_pkgs_extra: '{{ analytics_api_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ analytics_api_gunicorn_port }}' + edx_django_service_django_settings_module: '{{ ANALYTICS_API_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ analytics_api_environment }}' + edx_django_service_secret_key: '{{ ANALYTICS_API_SECRET_KEY }}' + edx_django_service_wsgi_name: '{{ analytics_api_wsgi_name }}' + edx_django_service_hostname: '~^((stage|prod)-)?{{ analytics_api_hostname }}.*' + edx_django_service_newrelic_appname: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ analytics_api_newrelic_appname }}' + edx_django_service_automated_users: '{{ ANALYTICS_API_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ ANALYTICS_API_CORS_ORIGIN_WHITELIST }}' + edx_django_service_allow_cors_headers: '{{ ANALYTICS_API_ALLOW_CORS_HEADERS }}' + edx_django_service_allow_cors_credentials: '{{ ANALYTICS_API_ALLOW_CORS_CREDENTIALS }}' + edx_django_service_basic_auth_exempted_paths_extra: '{{ ANALYTICS_API_BASIC_AUTH_EXEMPTED_PATHS }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ ANALYTICS_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ ANALYTICS_API_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ ANALYTICS_API_COPY_CONFIG_ENABLED }}' diff --git a/playbooks/roles/analytics_api/tasks/main.yml b/playbooks/roles/analytics_api/tasks/main.yml new file mode 100644 index 00000000000..4f460d56d8f --- /dev/null +++ b/playbooks/roles/analytics_api/tasks/main.yml @@ -0,0 +1,27 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Tasks for role analytics_api +# +# Overview: Most of this role's tasks come from edx_django_service. +# +# +# Dependencies: +# + +- name: create api users + shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}" + args: + chdir: "{{ analytics_api_code_dir }}" + become_user: "{{ analytics_api_user }}" + environment: "{{ analytics_api_environment }}" + with_dict: "{{ ANALYTICS_API_USERS }}" + tags: + - manage + - manage:app-users diff --git a/playbooks/roles/analytics_pipeline/defaults/main.yml b/playbooks/roles/analytics_pipeline/defaults/main.yml new file mode 100644 index 00000000000..c36fed2feb9 --- /dev/null +++ b/playbooks/roles/analytics_pipeline/defaults/main.yml @@ -0,0 +1,55 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role analytics_pipeline +# + +ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER: pipeline001 +ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD: password +ANALYTICS_PIPELINE_OUTPUT_DATABASE_HOST: localhost +ANALYTICS_PIPELINE_OUTPUT_DATABASE_PORT: 3306 + +ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME: "{{ ANALYTICS_API_REPORTS_DB_NAME }}" +ANALYTICS_PIPELINE_OUTPUT_DATABASE: + username: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER }}" + password: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD }}" + host: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_HOST }}" + port: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PORT }}" + +ANALYTICS_PIPELINE_INPUT_DATABASE: + username: "{{ COMMON_ANALYTICS_MYSQL_READ_ONLY_USER }}" + password: "{{ COMMON_ANALYTICS_MYSQL_READ_ONLY_PASS }}" + host: localhost + port: 3306 + +ANALYTICS_PIPELINE_CONFIG_DIR: "{{ COMMON_CFG_DIR }}/edx-analytics-pipeline" +ANALYTICS_PIPELINE_HDFS_DATA_DIR: "hdfs://localhost:9000/data" + +ANALYTICS_PIPELINE_LUIGI_HADOOP_VERSION: cdh4 +ANALYTICS_PIPELINE_LUIGI_HADOOP_COMMAND: "{{ HADOOP_COMMON_HOME }}/bin/hadoop" +ANALYTICS_PIPELINE_LUIGI_HADOOP_STREAMING_JAR: "{{ HADOOP_COMMON_HOME }}/share/hadoop/tools/lib/hadoop-streaming-{{ HADOOP_COMMON_VERSION }}.jar" + +# +# vars are namespaced with the module name. +# +analytics_pipeline_role_name: analytics_pipeline + +analytics_pipeline_util_library: + path: /var/tmp/edx-analytics-hadoop-util + repo: https://github.com/edx/edx-analytics-hadoop-util + version: master + +# +# OS packages +# + +analytics_pipeline_debian_pkgs: [] + +analytics_pipeline_redhat_pkgs: [] diff --git a/playbooks/roles/analytics_pipeline/files/acceptance.json b/playbooks/roles/analytics_pipeline/files/acceptance.json new file mode 100644 index 00000000000..24247f65fb7 --- /dev/null +++ b/playbooks/roles/analytics_pipeline/files/acceptance.json @@ -0,0 +1,17 @@ +{ + "connection_user": "hadoop", + "credentials_file_url": "/edx/etc/edx-analytics-pipeline/output.json", + "exporter_output_bucket": "", + "geolocation_data": "/var/tmp/geolocation-data.mmdb", + "hive_user": "hadoop", + "host": "localhost", + "identifier": "local-devstack", + "manifest_input_format": "org.edx.hadoop.input.ManifestTextInputFormat", + "oddjob_jar": "hdfs://localhost:9000/edx-analytics-pipeline/packages/edx-analytics-hadoop-util.jar", + "tasks_branch": "origin/HEAD", + "tasks_log_path": "/tmp/acceptance/", + "tasks_output_url": "hdfs://localhost:9000/acceptance-test-output/", + "tasks_repo": "/edx/app/analytics_pipeline/analytics_pipeline", + "vertica_creds_url": "", + "wheel_url": "https://edx-wheelhouse.s3-website-us-east-1.amazonaws.com/Ubuntu/precise" +} diff --git a/playbooks/roles/analytics_pipeline/meta/main.yml b/playbooks/roles/analytics_pipeline/meta/main.yml new file mode 100644 index 00000000000..98bac28b2b7 --- /dev/null +++ b/playbooks/roles/analytics_pipeline/meta/main.yml @@ -0,0 +1,17 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role analytics_pipeline + +dependencies: + - common + - hadoop_master + - hive + - sqoop diff --git a/playbooks/roles/analytics_pipeline/tasks/main.yml b/playbooks/roles/analytics_pipeline/tasks/main.yml new file mode 100644 index 00000000000..65b42d70279 --- /dev/null +++ b/playbooks/roles/analytics_pipeline/tasks/main.yml @@ -0,0 +1,243 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role analytics_pipeline +# +# Overview: +# +# Prepare the machine to run the edX Analytics Data Pipeline. The pipeline currently "installs itself" +# via an ansible playbook that is not included in the edx/configuration repo. However, in order to +# run the pipeline in a devstack environment, some configuration needs to be performed. In a production +# environment many of these config files are stored on S3. +# +# Dependencies: +# +# common: some of the variables from the common role are used here +# hadoop_master: ensures hadoop services are installed +# hive: the pipeline makes extensive usage of hive, so that needs to be installed as well +# sqoop: similarly to hive, the pipeline uses this tool extensively +# +# Example play: +# +# - name: Deploy all dependencies of edx-analytics-pipeline to the node +# hosts: all +# become: True +# gather_facts: True +# roles: +# - analytics_pipeline +# +# ansible-playbook -i 'localhost,' ./analytics_pipeline.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml +# + +- name: Create config directory + file: + path: "{{ ANALYTICS_PIPELINE_CONFIG_DIR }}" + state: directory + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + mode: "0755" + tags: + - install + - install:configuration + +- name: Store output database credentials for analytics pipeline + copy: + content: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE | to_json }}" + dest: "{{ COMMON_CFG_DIR }}/edx-analytics-pipeline/output.json" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + mode: "0644" + tags: + - install + - install:configuration + +- name: Store input database credentials for analytics pipeline + copy: + content: "{{ ANALYTICS_PIPELINE_INPUT_DATABASE | to_json }}" + dest: "{{ COMMON_CFG_DIR }}/edx-analytics-pipeline/input.json" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + mode: "0644" + tags: + - install + - install:configuration + +- name: luigi configuration directory created + file: + path: /etc/luigi + state: directory + mode: "0755" + tags: + - install + - install:configuration + +- name: luigi configuration file written + template: + src: client.cfg.j2 + dest: /etc/luigi/client.cfg + mode: "0644" + tags: + - install + - install:configuration + +- name: Util library source checked out + git: + repo: "{{ analytics_pipeline_util_library.repo }}" + dest: "{{ analytics_pipeline_util_library.path }}" + version: "{{ analytics_pipeline_util_library.version }}" + tags: + - install + - install:code + +- name: lib directory created + file: + path: "{{ HADOOP_COMMON_USER_HOME }}/lib" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: directory + tags: + - install + - install:app-requirements + +- name: Check if the util library needs to be built + stat: + path: "{{ HADOOP_COMMON_USER_HOME }}/lib/edx-analytics-hadoop-util.jar" + register: util_lib_built + tags: + - install + - install:app-requirements + +- name: Util library built + shell: > + {{ hadoop_common_java_home }}/bin/javac -cp `{{ HADOOP_COMMON_HOME }}/bin/hadoop classpath` org/edx/hadoop/input/ManifestTextInputFormat.java && + {{ hadoop_common_java_home }}/bin/jar cf {{ HADOOP_COMMON_USER_HOME }}/lib/edx-analytics-hadoop-util.jar org/edx/hadoop/input/ManifestTextInputFormat.class && + chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HADOOP_COMMON_USER_HOME }}/lib/edx-analytics-hadoop-util.jar + args: + chdir: "{{ analytics_pipeline_util_library.path }}" + when: not util_lib_built.stat.exists + tags: + - install + - install:app-requirements + +- name: reload systemd configuration + command: systemctl daemon-reload + tags: + - install + - install:configuration + +- name: enable Hadoop services + service: + name: "{{ hadoop_common_services }}" + enabled: yes + tags: + - install + - install:configuration + +- name: start Hadoop services + service: + name: "{{ hadoop_common_services }}" + state: started + tags: + - manage + - manage:start + +- name: stop Hadoop services + service: + name: "{{ hadoop_common_services }}" + state: stopped + tags: + - manage:stop + +- name: restart Hadoop services + service: + name: "{{ hadoop_common_services }}" + state: restarted + tags: + - manage:start + - manage:restart + +- name: Ensure package dir exists in HDFS + shell: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh && hdfs dfs -mkdir -p /edx-analytics-pipeline/packages/" + become_user: "{{ hadoop_common_user }}" + tags: + - install + - install:app-requirements + +- name: Ensure util library is in HDFS + shell: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh && hdfs dfs -put -f {{ HADOOP_COMMON_USER_HOME }}/lib/edx-analytics-hadoop-util.jar /edx-analytics-pipeline/packages/" + become_user: "{{ hadoop_common_user }}" + tags: + - install + - install:app-requirements + register: libcp + until: libcp is succeeded + retries: 6 + delay: 10 + +- name: Ensure the data directory exists + shell: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh && hdfs dfs -mkdir -p {{ ANALYTICS_PIPELINE_HDFS_DATA_DIR }}" + become_user: "{{ hadoop_common_user }}" + tags: + - install + - install:base + +- name: Ensure tracking log file can be read + file: + path: "{{ COMMON_LOG_DIR }}/tracking/tracking.log" + mode: "0644" + ignore_errors: yes + tags: + - install + - install:configuration + +- name: Cron job syncs tracking log file to hdfs + cron: + user: "{{ hadoop_common_user }}" + name: "Sync tracking log to HDFS" + job: "{{ HADOOP_COMMON_HOME }}/bin/hdfs dfs -put -f {{ COMMON_LOG_DIR }}/tracking/tracking.log {{ ANALYTICS_PIPELINE_HDFS_DATA_DIR }}/tracking.log" + tags: + - install + - install:configuration + +- name: store configuration for acceptance tests + copy: + src: acceptance.json + dest: /var/tmp/acceptance.json + mode: "0644" + tags: + - install + - install:configuration + +- name: Grant access to table storing test data in output database + mysql_user: + user: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE.username }}" + password: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE.password }}" + priv: 'acceptance%.*:ALL' + append_privs: yes + tags: + - install + - install:configuration + +- name: Test if Hive metadata store schema exists + shell: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh && {{ HIVE_HOME }}/bin/hive | tr '\n' ' '" + become_user: "{{ hadoop_common_user }}" + register: hive_metastore_info + tags: + - install + - install:configuration + +- name: Initialize Hive metadata store schema + shell: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh && {{ HIVE_HOME }}/bin/schematool -dbType mysql -initSchema" + become_user: "{{ hadoop_common_user }}" + when: "'Version information not found in metastore' in hive_metastore_info.stderr" + tags: + - install + - install:configuration diff --git a/playbooks/roles/analytics_pipeline/templates/client.cfg.j2 b/playbooks/roles/analytics_pipeline/templates/client.cfg.j2 new file mode 100644 index 00000000000..1c288c0bdbf --- /dev/null +++ b/playbooks/roles/analytics_pipeline/templates/client.cfg.j2 @@ -0,0 +1,4 @@ +[hadoop] +version: {{ ANALYTICS_PIPELINE_LUIGI_HADOOP_VERSION }} +command: {{ ANALYTICS_PIPELINE_LUIGI_HADOOP_COMMAND }} +streaming-jar: {{ ANALYTICS_PIPELINE_LUIGI_HADOOP_STREAMING_JAR }} diff --git a/playbooks/roles/ansible-role-django-ida/tasks/main.yml b/playbooks/roles/ansible-role-django-ida/tasks/main.yml new file mode 100644 index 00000000000..671e6e39175 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/tasks/main.yml @@ -0,0 +1,59 @@ +--- +- name: Check if the role exists + stat: + path: "roles/{{ my_role_name }}" + register: role_exists + +- name: Prompt for overwrite + pause: prompt="Role {{ my_role_name }} exists. Overwrite? Touch any key to continue or -c, then a, to abort." + when: role_exists.stat.exists + +- name: Create docker directories + file: + path: '../docker/{{ item }}/{{ my_role_name|replace("_","-") }}' + state: directory + with_items: + - build + +- name: Create role directories + file: + path: "roles/{{ my_role_name }}/{{ item }}" + state: directory + with_items: + - tasks + - meta + - defaults + - templates/edx/app/supervisor/conf.d.available + - templates/edx/app/{{ my_role_name }} + - templates/edx/app/nginx/sites-available + +- name: Make an ansible role + template: + src: "{{ item }}/main.yml.j2" + dest: "roles/{{ my_role_name }}/{{ item }}/main.yml" + with_items: + - tasks + - meta + - defaults + +- name: Update docker templates + template: + src: "docker/{{ item.src }}" + dest: "../docker/{{ item.dest }}" + with_items: + - { src: 'build/ROLE_NAME/Dockerfile.j2', dest: 'build/{{ my_role_name|replace("_","-") }}/Dockerfile'} + - { src: 'build/ROLE_NAME/ansible_overrides.yml.j2', dest: 'build/{{ my_role_name|replace("_","-") }}/ansible_overrides.yml'} + - { src: 'build/ROLE_NAME/inventory', dest: 'build/{{ my_role_name|replace("_","-") }}/inventory'} + - { src: 'plays/ROLE_NAME.yml.j2', dest: 'plays/{{ my_role_name }}.yml'} + +- name: Update role templates + template: + src: "templates/templates/{{ item.src }}" + dest: "roles/{{ my_role_name }}/templates/{{ item.dest }}" + with_items: + - { src: 'edx/app/supervisor/conf.d.available/ROLE_NAME.conf.j2', dest: 'edx/app/supervisor/conf.d.available/{{ my_role_name }}.conf.j2'} + - { src: 'edx/app/ROLE_NAME/ROLE_NAME.sh.j2', dest: 'edx/app/{{ my_role_name }}/{{ my_role_name }}.sh.j2'} + - { src: 'edx/app/ROLE_NAME/ROLE_NAME_env.j2', dest: 'edx/app/{{ my_role_name }}/{{ my_role_name }}_env.j2'} + - { src: 'edx/app/ROLE_NAME/ROLE_NAME_gunicorn.py.j2', dest: 'edx/app/{{ my_role_name }}/{{ my_role_name }}_gunicorn.py.j2'} + - { src: 'edx/app/ROLE_NAME/devstack.sh.j2', dest: 'edx/app/{{ my_role_name }}/devstack.sh.j2'} + - { src: 'edx/app/nginx/sites-available/ROLE_NAME.j2', dest: 'edx/app/nginx/sites-available/{{ my_role_name }}.j2'} diff --git a/playbooks/roles/ansible-role-django-ida/templates/defaults/main.yml.j2 b/playbooks/roles/ansible-role-django-ida/templates/defaults/main.yml.j2 new file mode 100644 index 00000000000..5acc8e57c14 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/defaults/main.yml.j2 @@ -0,0 +1,147 @@ +--- +{% include 'roles/ansible-role/templates/header.j2' %} +# +# Defaults for role {{ role_name }} +# +{{ role_name|upper }}_GIT_IDENTITY: !!null + +# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC +# and a key being provided via NEWRELIC_LICENSE_KEY +{{ role_name|upper }}_NEWRELIC_APPNAME: "{{ '{{ COMMON_ENVIRONMENT }}' }}-{{ '{{ COMMON_DEPLOYMENT }}' }}-{{ '{{' }} {{ role_name }}_service_name }}" +{{ role_name|upper }}_PIP_EXTRA_ARGS: "-i {{ '{{ COMMON_PYPI_MIRROR_URL }}' }}" +{{ role_name|upper }}_NGINX_PORT: 18{{ port_suffix }} +{{ role_name|upper }}_SSL_NGINX_PORT: 48{{ port_suffix }} + +{{ role_name|upper }}_DEFAULT_DB_NAME: '{{ role_name }}' +{{ role_name|upper }}_MYSQL: 'localhost' +# MySQL usernames are limited to 16 characters +{{ role_name|upper }}_MYSQL_USER: '{{ role_name[:13] }}001' +{{ role_name|upper }}_MYSQL_PASSWORD: 'password' +{{ role_name|upper }}_MYSQL_OPTIONS: + connect_timeout: 10 + +{{ role_name|upper }}_DATABASES: + # rw user + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ '{{' }} {{ role_name|upper }}_DEFAULT_DB_NAME }}' + USER: '{{ '{{' }} {{ role_name|upper }}_MYSQL_USER }}' + PASSWORD: '{{ '{{' }} {{ role_name|upper }}_MYSQL_PASSWORD }}' + HOST: '{{ '{{' }} {{ role_name|upper}}_MYSQL }}' + PORT: '3306' + ATOMIC_REQUESTS: true + CONN_MAX_AGE: 60 + OPTIONS: '{{ '{{' }} {{ role_name|upper}}_MYSQL_OPTIONS }}' + + +{{ role_name|upper }}_MEMCACHE: [ 'memcache' ] + +{{ role_name|upper }}_CACHES: + default: + BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' + KEY_PREFIX: '{{ '{{' }} {{ role_name }}_service_name }}' + LOCATION: '{{ '{{' }} {{ role_name|upper}}_MEMCACHE }}' + +{{ role_name|upper }}_VERSION: "master" +{{ role_name|upper }}_DJANGO_SETTINGS_MODULE: "{{ role_name }}.settings.production" +{{ role_name|upper }}_URL_ROOT: 'http://{{ role_name }}:18{{ port_suffix }}' +{{ role_name|upper }}_LOGOUT_URL: '{{ '{{' }} {{ role_name|upper }}_URL_ROOT }}/logout/' +{{ role_name|upper }}_OAUTH_URL_ROOT: '{{ EDXAPP_LMS_ISSUER | default("http://127.0.0.1:8000/oauth2") }}' +{{ role_name|upper }}_OAUTH2_LOGOUT_URL: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000/") }}/logout' + +{{ role_name|upper }}_SECRET_KEY: 'Your secret key here' +{{ role_name|upper }}_TIME_ZONE: 'UTC' +{{ role_name|upper }}_LANGUAGE_CODE: 'en-us' + +# Used to automatically configure OAuth2 Client +{{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_KEY : '{{ role_name|lower }}-sso-key' +{{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_SECRET : '{{ role_name|lower }}-sso-secret' +{{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_KEY : '{{ role_name|lower }}-backend-service-key' +{{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_SECRET : '{{ role_name|lower }}-backend-service-secret' +{{ role_name|upper }}_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +{{ role_name|upper }}_PLATFORM_NAME: 'Your Platform Name Here' + +{{ role_name|upper }}_SERVICE_CONFIG: + SECRET_KEY: '{{ '{{' }} {{ role_name|upper }}_SECRET_KEY }}' + TIME_ZONE: '{{ '{{' }} {{ role_name|upper }}_TIME_ZONE }}' + LANGUAGE_CODE: '{{ '{{' }} {{ role_name|upper }}_LANGUAGE_CODE }}' + + SOCIAL_AUTH_EDX_OAUTH2_KEY: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + SOCIAL_AUTH_EDX_OAUTH2_SECRET: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + BACKEND_SERVICE_EDX_OAUTH2_KEY: '{{ '{{' }} {{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + BACKEND_SERVICE_EDX_OAUTH2_SECRET: '{{ '{{' }} {{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: '{{ '{{' }} {{ role_name|upper }}_OAUTH_URL_ROOT }}' + SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: '{{ '{{' }} {{ role_name|upper }}_OAUTH2_LOGOUT_URL }}' + SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' + + STATIC_ROOT: "{{ '{{' }} COMMON_DATA_DIR }}/{{ '{{' }} {{ role_name }}_service_name }}/staticfiles" + # db config + DATABASES: '{{ '{{' }} {{ role_name|upper }}_DATABASES }}' + CACHES: '{{ '{{' }} {{ role_name|upper }}_CACHES }}' + + PLATFORM_NAME: '{{ '{{' }} {{ role_name|upper }}_PLATFORM_NAME }}' + + +{{ role_name|upper }}_REPOS: + - PROTOCOL: "{{ '{{' }} COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ '{{' }} COMMON_GIT_MIRROR }}" + PATH: "{{ '{{' }} COMMON_GIT_PATH }}" + REPO: {{ role_name|replace('_', '-') }}.git + VERSION: "{{ '{{' }} {{ role_name|upper }}_VERSION }}" + DESTINATION: "{{ '{{' }} {{ role_name }}_code_dir }}" + SSH_KEY: "{{ '{{' }} {{ role_name|upper }}_GIT_IDENTITY }}" + + +{{ role_name|upper }}_GUNICORN_WORKERS: "2" +{{ role_name|upper }}_GUNICORN_EXTRA: "" +{{ role_name|upper }}_GUNICORN_EXTRA_CONF: "" +{{ role_name|upper }}_GUNICORN_WORKER_CLASS: "gevent" +{{ role_name|upper }}_GUNICORN_MAX_REQUESTS: !!null + +{{ role_name|upper }}_HOSTNAME: '~^((stage|prod)-)?{{ role_name|replace('_', '-') }}.*' + +{{ role_name|upper }}_DEBIAN_EXTRA_PKGS: [] + +nginx_{{ role_name }}_gunicorn_hosts: + - 127.0.0.1 + +# +# vars are namespace with the module name. +# +{{ role_name }}_role_name: {{ role_name }} +{{ role_name|lower }}_venv_dir: "{{ '{{' }} {{ role_name|lower }}_home }}/venvs/{{ '{{' }} {{ role_name|lower }}_service_name }}" + +{{ role_name }}_environment: + DJANGO_SETTINGS_MODULE: "{{ '{{' }} {{ role_name|upper }}_DJANGO_SETTINGS_MODULE }}" + {{ role_name|upper }}_CFG: "{{ '{{' }} COMMON_CFG_DIR }}/{{ '{{' }} {{ role_name }}_service_name }}.yml" + PATH: "{{ '{{' }} {{ role_name|lower }}_venv_dir }}/bin:{{ '{{' }} ansible_env.PATH }}" + +{{ role_name }}_migration_environment: + DJANGO_SETTINGS_MODULE: "{{ '{{' }} {{ role_name|upper }}_DJANGO_SETTINGS_MODULE }}" + {{ role_name|upper }}_CFG: "{{ '{{' }} COMMON_CFG_DIR }}/{{ '{{' }} {{ role_name }}_service_name }}.yml" + PATH: "{{ '{{' }} {{ role_name|lower }}_venv_dir }}/bin:{{ '{{' }} ansible_env.PATH }}" + DB_MIGRATION_USER: "{{ '{{' }} COMMON_MYSQL_MIGRATE_USER }}" + DB_MIGRATION_PASS: "{{ '{{' }} COMMON_MYSQL_MIGRATE_PASS }}" + +{{ role_name }}_service_name: "{{ role_name }}" +{{ role_name }}_user: "{{ '{{' }} {{ role_name }}_service_name }}" +{{ role_name }}_home: "{{ '{{' }} COMMON_APP_DIR }}/{{ '{{' }} {{ role_name }}_service_name }}" +{{ role_name }}_code_dir: "{{ '{{' }} {{ role_name }}_home }}/{{ '{{' }} {{ role_name }}_service_name }}" + +{{ role_name }}_gunicorn_host: "127.0.0.1" +{{ role_name }}_gunicorn_port: 8{{ port_suffix }} +{{ role_name }}_gunicorn_timeout: 300 + +{{ role_name }}_log_dir: "{{ '{{' }} COMMON_LOG_DIR }}/{{ '{{' }} {{ role_name }}_service_name }}" + +# +# OS packages +# + +{{ role_name }}_debian_pkgs: + - libmysqlclient-dev + - libssl-dev + - pkg-config + +{{ role_name }}_redhat_pkgs: [] diff --git a/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/Dockerfile.j2 b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/Dockerfile.j2 new file mode 100644 index 00000000000..1b0103a8b40 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/Dockerfile.j2 @@ -0,0 +1,30 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/{{ role_name|replace("_","-") }}/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +FROM edxops/xenial-common:latest +MAINTAINER edxops + +ARG {{ role_name|upper }}_VERSION=master +ARG REPO_OWNER=edx + +ADD . /edx/app/edx_ansible/edx_ansible + +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/{{ role_name|replace("_","-") }}/ansible_overrides.yml / +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook {{ role_name }}.yml \ + -c local -i '127.0.0.1,' \ + -t 'install,assets,devstack:install' \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="{{ role_name|upper }}_VERSION=${{ role_name|upper }}_VERSION" \ + --extra-vars="COMMON_GIT_PATH=$REPO_OWNER" + +USER root +ENTRYPOINT ["/edx/app/edxapp/devstack.sh"] +CMD ["start"] diff --git a/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/ansible_overrides.yml.j2 b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/ansible_overrides.yml.j2 new file mode 100644 index 00000000000..7a5ce0a5dca --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/ansible_overrides.yml.j2 @@ -0,0 +1,6 @@ +--- +{{ role_name }}_gunicorn_host: 127.0.0.1 +{{ role_name|upper }}_MYSQL: 'db' +{{ role_name|upper }}_DJANGO_SETTINGS_MODULE: '{{ role_name }}.settings.devstack' +{{ role_name|upper }}_MYSQL_MATCHER: '%' + diff --git a/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/inventory b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/inventory new file mode 100644 index 00000000000..8bb7ba6b33a --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/inventory @@ -0,0 +1,2 @@ +[local] +localhost diff --git a/playbooks/roles/ansible-role-django-ida/templates/docker/plays/ROLE_NAME.yml.j2 b/playbooks/roles/ansible-role-django-ida/templates/docker/plays/ROLE_NAME.yml.j2 new file mode 100644 index 00000000000..b927aebac9a --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/docker/plays/ROLE_NAME.yml.j2 @@ -0,0 +1,12 @@ +- name: Deploy {{ role_name|replace('_', ' ')|title }} + hosts: all + sudo: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ '{{' }} serial_count }}" + roles: + - nginx + - role: {{ role_name }} + nginx_default_sites: + - {{ role_name }} \ No newline at end of file diff --git a/playbooks/roles/ansible-role-django-ida/templates/header.j2 b/playbooks/roles/ansible-role-django-ida/templates/header.j2 new file mode 100644 index 00000000000..de44282a38b --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/header.j2 @@ -0,0 +1,9 @@ +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# diff --git a/playbooks/roles/ansible-role-django-ida/templates/meta/main.yml.j2 b/playbooks/roles/ansible-role-django-ida/templates/meta/main.yml.j2 new file mode 100644 index 00000000000..9e00c503473 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/meta/main.yml.j2 @@ -0,0 +1,25 @@ +--- +{% include 'roles/ansible-role/templates/header.j2' %} +# +# Role includes for role {{ role_name }} +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } +dependencies: + - common + - supervisor + - role: edx_service + edx_service_name: "{{ '{{' }} {{ role_name }}_service_name }}" + edx_service_config: "{{ '{{' }} {{ role_name|upper }}_SERVICE_CONFIG }}" + edx_service_repos: "{{ '{{' }} {{ role_name|upper }}_REPOS }}" + edx_service_user: "{{ '{{' }} {{ role_name }}_user }}" + edx_service_home: "{{ '{{' }} {{ role_name }}_home }}" + edx_service_packages: + debian: "{{ '{{' }} {{ role_name }}_debian_pkgs + {{ role_name|upper }}_DEBIAN_EXTRA_PKGS }}" + redhat: "{{ '{{' }} {{ role_name }}_redhat_pkgs }}" diff --git a/playbooks/roles/ansible-role-django-ida/templates/tasks/main.yml.j2 b/playbooks/roles/ansible-role-django-ida/templates/tasks/main.yml.j2 new file mode 100644 index 00000000000..47cd89a7b43 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/tasks/main.yml.j2 @@ -0,0 +1,186 @@ +--- +{% include 'roles/ansible-role/templates/header.j2' %} + +# +# Tasks for role {{ role_name }} +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: add gunicorn configuration file + template: + src: edx/app/{{ role_name }}/{{ role_name }}_gunicorn.py.j2 + dest: "{{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_gunicorn.py" + sudo_user: "{{ '{{' }} {{ role_name }}_user }}" + tags: + - install + - install:configuration + +- name: build virtualenv + command: "virtualenv {{ '{{' }} {{ role_name }}_venv_dir }}" + args: + creates: "{{ '{{' }} {{ role_name }}_venv_dir }}/bin/pip" + sudo_user: "{{ '{{' }} {{ role_name }}_user }}" + environment: "{{ '{{' }} {{ role_name }}_environment }}" + tags: + - install + - install:app-requirements + +- name: install application requirements + command: make requirements + args: + chdir: "{{ '{{' }} {{ role_name }}_code_dir }}" + sudo_user: "{{ '{{' }} {{ role_name }}_user }}" + environment: "{{ '{{' }} {{ role_name }}_environment }}" + tags: + - install + - install:app-requirements + +- name: install development requirements + command: make local-requirements + args: + chdir: "{{ '{{' }} {{ role_name }}_code_dir }}" + sudo_user: "{{ '{{' }} {{ role_name }}_user }}" + environment: "{{ '{{' }} {{ role_name }}_environment }}" + tags: + - devstack + - devstack:install + +- name: migrate database + command: make migrate + args: + chdir: "{{ '{{' }} {{ role_name }}_code_dir }}" + sudo_user: "{{ '{{' }} {{ role_name }}_user }}" + environment: "{{ '{{' }} {{ role_name }}_migration_environment }}" + when: migrate_db is defined and migrate_db|lower == "yes" + run_once: yes + tags: + - migrate + - migrate:db + +- name: write out the supervisor wrapper + template: + src: "edx/app/{{ role_name }}/{{ role_name }}.sh.j2" + dest: "{{ '{{' }} {{ role_name }}_home }}/{{ '{{' }} {{ role_name }}_service_name }}.sh" + mode: 0650 + owner: "{{ '{{' }} supervisor_user }}" + group: "{{ '{{' }} common_web_user }}" + tags: + - install + - install:configuration + +- name: write supervisord config + template: + src: "edx/app/supervisor/conf.d.available/{{ role_name }}.conf.j2" + dest: "{{ '{{' }} supervisor_available_dir }}/{{ '{{' }} {{ role_name }}_service_name }}.conf" + owner: "{{ '{{' }} supervisor_user }}" + group: "{{ '{{' }} common_web_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: write devstack script + template: + src: "edx/app/{{ role_name }}/devstack.sh.j2" + dest: "{{ '{{' }} {{ role_name }}_home }}/devstack.sh" + owner: "{{ '{{' }} supervisor_user }}" + group: "{{ '{{' }} common_web_user }}" + mode: 0744 + tags: + - devstack + - devstack:install + +- name: setup the {{ role_name }} env file + template: + src: "./{{ '{{' }} {{ role_name }}_home }}/{{ '{{' }} {{ role_name }}_service_name }}_env.j2" + dest: "{{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_env" + owner: "{{ '{{' }} {{ role_name }}_user }}" + group: "{{ '{{' }} {{ role_name }}_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: enable supervisor script + file: + src: "{{ '{{' }} supervisor_available_dir }}/{{ '{{' }} {{ role_name }}_service_name }}.conf" + dest: "{{ '{{' }} supervisor_cfg_dir }}/{{ '{{' }} {{ role_name }}_service_name }}.conf" + state: link + force: yes + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: update supervisor configuration + command: "{{ '{{' }} supervisor_ctl }} -c {{ '{{' }} supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: create symlinks from the repo dir + file: + src: "{{ '{{' }} {{ role_name }}_code_dir }}/{{ '{{' }} item }}" + dest: "{{ '{{' }} COMMON_BIN_DIR }}/{{ '{{' }} item.split('.')[0] }}.{{ role_name }}" + state: link + with_items: + - manage.py + tags: + - install + - install:app-requirements + +- name: run collectstatic + command: make static + args: + chdir: "{{ '{{' }} {{ role_name }}_code_dir }}" + sudo_user: "{{ '{{' }} {{ role_name }}_user }}" + environment: "{{ '{{' }} {{ role_name }}_environment }}" + tags: + - assets + - assets:gather + +- name: restart the application + supervisorctl: + state: restarted + supervisorctl_path: "{{ '{{' }} supervisor_ctl }}" + config: "{{ '{{' }} supervisor_cfg }}" + name: "{{ '{{' }} {{ role_name }}_service_name }}" + when: not disable_edx_services + sudo_user: "{{ '{{' }} supervisor_service_user }}" + tags: + - manage + - manage:start + +- name: Copying nginx configs for {{ role_name }} + template: + src: "edx/app/nginx/sites-available/{{ role_name }}.j2" + dest: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}" + owner: root + group: "{{ '{{' }} common_web_user }}" + mode: 0640 + notify: reload nginx + tags: + - install + - install:vhosts + +- name: Creating nginx config links for {{ role_name }} + file: + src: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}" + dest: "{{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}" + state: link + owner: root + group: root + notify: reload nginx + tags: + - install + - install:vhosts + diff --git a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME.sh.j2 b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME.sh.j2 new file mode 100644 index 00000000000..0bcbab51303 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME.sh.j2 @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# {{ '{{' }} ansible_managed }} + +{{ '{%' }} set {{ role_name }}_venv_bin = {{ role_name }}_home + "/venvs/" + {{ role_name }}_service_name + "/bin" %} +{{ '{%' }} if COMMON_ENABLE_NEWRELIC_APP %} +{{ '{%' }} set executable = {{ role_name }}_venv_bin + '/newrelic-admin run-program ' + {{ role_name }}_venv_bin + '/gunicorn' %} +{{ '{%' }} else %} +{{ '{%' }} set executable = {{ role_name }}_venv_bin + '/gunicorn' %} +{{ '{%' }} endif %} + +{{ '{%' }} if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_APP_NAME="{{ '{{' }} {{ role_name|upper }}_NEWRELIC_APPNAME }}" +export NEW_RELIC_LICENSE_KEY="{{ '{{' }} NEWRELIC_LICENSE_KEY }}" +{{ '{%' }} endif -%} + +source {{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_env +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ '{{' }} executable }} -c {{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_gunicorn.py {{ '{{' }} {{ role_name|upper }}_GUNICORN_EXTRA }} {{ role_name }}.wsgi:application diff --git a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME_env.j2 b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME_env.j2 new file mode 100644 index 00000000000..7998189f1f8 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME_env.j2 @@ -0,0 +1,7 @@ +# {{ '{{' }} ansible_managed }} + +{{ '{%' }} for name,value in {{ role_name }}_environment.items() -%} +{{ '{%' }}- if value -%} +export {{ '{{' }} name }}="{{ '{{' }} value }}" +{{ '{%' }} endif %} +{{ '{%' }}- endfor %} diff --git a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME_gunicorn.py.j2 b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME_gunicorn.py.j2 new file mode 100644 index 00000000000..fb1ac4ee7a3 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME_gunicorn.py.j2 @@ -0,0 +1,16 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html +{{ '{{' }} ansible_managed }} +""" + +timeout = {{ '{{' }} {{ role_name }}_gunicorn_timeout }} +bind = "{{ '{{' }} {{ role_name }}_gunicorn_host }}:{{ '{{' }} {{ role_name }}_gunicorn_port }}" +pythonpath = "{{ '{{' }} {{ role_name }}_code_dir }}" +workers = {{ '{{' }} {{ role_name|upper }}_GUNICORN_WORKERS }} +worker_class = "{{ '{{' }} {{ role_name|upper }}_GUNICORN_WORKER_CLASS }}" + +{{ '{%' }} if {{ role_name|upper }}_GUNICORN_MAX_REQUESTS {{ '%}' }} +max_requests = {{ '{{' }} {{ role_name|upper }}_GUNICORN_MAX_REQUESTS }} +{{ '{%' }} endif {{ '%}' }} + +{{ '{{' }} {{ role_name|upper }}_GUNICORN_EXTRA_CONF }} diff --git a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/devstack.sh.j2 b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/devstack.sh.j2 new file mode 100644 index 00000000000..5b5e1b12453 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/devstack.sh.j2 @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# {{ '{{' }} ansible_managed }} + +source {{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_env +COMMAND=$1 + +case $COMMAND in + start) + {{ '{%' }} set {{ role_name }}_venv_bin = {{ role_name }}_home + "/venvs/" + {{ role_name }}_service_name + "/bin" %} + {{ '{%' }} set executable = {{ role_name }}_venv_bin + '/gunicorn' %} + + {{ '{{' }} supervisor_venv_bin }}/supervisord --configuration {{ '{%' }}supervisor_cfg }} + + cd /edx/app/edx_ansible/edx_ansible/docker/plays + /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook {{ role_name }}.yml -c local -i '127.0.0.1,' \ + -t 'install:app-requirements,assets:gather,devstack,migrate,manage:start' \ + --extra-vars="migrate_db=yes" \ + --extra-vars="@/ansible_overrides.yml" + + # Docker requires an active foreground task. Tail the logs to appease Docker and + # provide useful output for development. + cd {{ '{{' }} supervisor_log_dir }} + tail -f {{ '{{' }} {{ role_name }}_service_name }}-stderr.log -f {{ '{{' }} {{ role_name }}_service_name }}-stdout.log + ;; + open) + cd {{ '{{' }} {{ role_name }}_code_dir }}/ + . {{ '{{' }} {{ role_name }}_venv_bin }}/activate + /bin/bash + ;; +esac diff --git a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/nginx/sites-available/ROLE_NAME.j2 b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/nginx/sites-available/ROLE_NAME.j2 new file mode 100644 index 00000000000..74639aedae0 --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/nginx/sites-available/ROLE_NAME.j2 @@ -0,0 +1,95 @@ +# +# {{ '{{' }} ansible_managed }} +# + + +{{ '{%' }} if nginx_default_sites is defined and "{{ role_name }}" in nginx_default_sites {{ '%}' }} + {{ '{%' }} set default_site = "default_server" {{ '%}' }} +{{ '{%' }} else {{ '%}' }} + {{ '{%' }} set default_site = "" {{ '%}' }} +{{ '{%' }} endif {{ '%}' }} + +upstream {{ role_name }}_app_server { +{{ '{%' }} for host in nginx_{{ role_name }}_gunicorn_hosts {{ '%}' }} + server {{ '{{' }} host }}:{{ '{{' }} {{ role_name }}_gunicorn_port }} fail_timeout=0; +{{ '{%' }} endfor {{ '%}' }} +} + +server { + server_name {{ '{{' }} {{ role_name|upper }}_HOSTNAME }}; + + {{ '{%' }} if NGINX_ENABLE_SSL {{ '%}' }} + + listen {{ '{{' }} {{ role_name|upper }}_NGINX_PORT }} {{ '{{' }} default_site }}; + listen {{ '{{' }} {{ role_name|upper }}_SSL_NGINX_PORT }} ssl; + + ssl_certificate /etc/ssl/certs/{{ '{{' }} NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ '{{' }} NGINX_SSL_KEY|basename }}; + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + {{ '{%' }} else {{ '%}' }} + listen {{ '{{' }} {{ role_name|upper }}_NGINX_PORT }} {{ '{{' }} default_site }}; + {{ '{%' }} endif {{ '%}' }} + + location ~ ^/static/(?P.*) { + root {{ '{{' }} COMMON_DATA_DIR }}/{{ '{{' }} {{ role_name }}_service_name }}; + try_files /staticfiles/$file =404; + } + + location / { + try_files $uri @proxy_to_app; + } + + {{ '{%' }} if NGINX_ROBOT_RULES|length > 0 {{ '%}' }} + location /robots.txt { + root {{ '{{' }} nginx_app_dir }}; + try_files $uri /robots.txt =404; + } + {{ '{%' }} endif {{ '%}' }} + + location @proxy_to_app { + {{ '{%' }} if NGINX_SET_X_FORWARDED_HEADERS {{ '%}' }} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + {{ '{%' }} else {{ '%}' }} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + {{ '{%' }} endif {{ '%}' }} + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://{{ role_name }}_app_server; + } + +# Prevent invalid display courseware in IE 10+ with high privacy settings + add_header P3P {{ '{{' }} NGINX_P3P_MESSAGE {{ '}}' }} + + # Nginx does not support nested condition or or conditions so + # there is an unfortunate mix of conditonals here. + {{ '{%' }} if NGINX_REDIRECT_TO_HTTPS {{ '%}' }} + {{ '{%' }} if NGINX_HTTPS_REDIRECT_STRATEGY == "scheme" {{ '%}' }} + # Redirect http to https over single instance + if ($scheme != "https") + { + set $do_redirect_to_https "true"; + } + + {{ '{%' }} elif NGINX_HTTPS_REDIRECT_STRATEGY == "forward_for_proto" {{ '%}' }} + + # Forward to HTTPS if we're an HTTP request... and the server is behind ELB + if ($http_x_forwarded_proto = "http") + { + set $do_redirect_to_https "true"; + } + {{ '{%' }} endif {{ '%}' }} + + # Execute the actual redirect + if ($do_redirect_to_https = "true") + { + return 301 https://$host$request_uri; + } + {{ '{%' }} endif {{ '%}' }} +} diff --git a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/supervisor/conf.d.available/ROLE_NAME.conf.j2 b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/supervisor/conf.d.available/ROLE_NAME.conf.j2 new file mode 100644 index 00000000000..29dfa34f20d --- /dev/null +++ b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/supervisor/conf.d.available/ROLE_NAME.conf.j2 @@ -0,0 +1,12 @@ +# +# {{ '{{' }} ansible_managed }} +# +[program:{{ '{{' }} {{ role_name }}_service_name }}] + +command={{ '{{' }} {{ role_name }}_home }}/{{ '{{' }} {{ role_name }}_service_name }}.sh +user={{ '{{' }} common_web_user }} +directory={{ '{{' }} {{ role_name }}_code_dir }} +stdout_logfile={{ '{{' }} supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ '{{' }} supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/ansible-role/tasks/main.yml b/playbooks/roles/ansible-role/tasks/main.yml index ba2a7f5e37f..554e5660324 100644 --- a/playbooks/roles/ansible-role/tasks/main.yml +++ b/playbooks/roles/ansible-role/tasks/main.yml @@ -1,28 +1,29 @@ ---- - -- name: check if the role exists - command: test -d roles/{{ role_name }} +--- +- name: Check if the role exists + command: "test -d roles/{{ my_role_name }}" register: role_exists ignore_errors: yes -- name: prompt for overwrite - pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or -c, then a, to abort." - when: role_exists | success +- name: Prompt for overwrite + pause: prompt="Role {{ my_role_name }} exists. Overwrite? Touch any key to continue or -c, then a, to abort." + when: role_exists is succeeded -- name: create role directories - file: path=roles/{{role_name}}/{{ item }} state=directory +- name: Create role directories + file: + path: "roles/{{ my_role_name }}/{{ item }}" + state: directory with_items: - tasks - meta - - handlers - defaults - templates - files -- name: make an ansible role - template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml +- name: Make an ansible role + template: + src: "{{ item }}/main.yml.j2" + dest: "roles/{{ my_role_name }}/{{ item }}/main.yml" with_items: - tasks - meta - defaults - - handlers diff --git a/playbooks/roles/ansible-role/templates/defaults/main.yml.j2 b/playbooks/roles/ansible-role/templates/defaults/main.yml.j2 index 800c37c31d4..f9bc37b2926 100644 --- a/playbooks/roles/ansible-role/templates/defaults/main.yml.j2 +++ b/playbooks/roles/ansible-role/templates/defaults/main.yml.j2 @@ -1,18 +1,18 @@ --- {% include 'roles/ansible-role/templates/header.j2' %} # -# Defaults for role {{ role_name }} +# Defaults for role {{ my_role_name }} # # -# vars are namespace with the module name. +# vars are namespaced with the module name. # -{{ role_name }}_role_name: {{ role_name }} +{{ my_role_name }}_role_name: {{ my_role_name }} # # OS packages # -{{ role_name }}_debian_pkgs: [] +{{ my_role_name }}_debian_pkgs: [] -{{ role_name }}_redhat_pkgs: [] +{{ my_role_name }}_redhat_pkgs: [] diff --git a/playbooks/roles/ansible-role/templates/handlers/main.yml.j2 b/playbooks/roles/ansible-role/templates/handlers/main.yml.j2 deleted file mode 100644 index 9779654d79d..00000000000 --- a/playbooks/roles/ansible-role/templates/handlers/main.yml.j2 +++ /dev/null @@ -1,11 +0,0 @@ ---- -{% include 'roles/ansible-role/templates/header.j2' %} - -# -# Handlers for role {{ role_name }} -# -# Overview: -# -# -- name: notify me - debug: msg="stub handler" diff --git a/playbooks/roles/ansible-role/templates/header.j2 b/playbooks/roles/ansible-role/templates/header.j2 index 635086179ce..de44282a38b 100644 --- a/playbooks/roles/ansible-role/templates/header.j2 +++ b/playbooks/roles/ansible-role/templates/header.j2 @@ -1,9 +1,9 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # diff --git a/playbooks/roles/ansible-role/templates/meta/main.yml.j2 b/playbooks/roles/ansible-role/templates/meta/main.yml.j2 index f7c1496a1e3..33d3e7df97c 100644 --- a/playbooks/roles/ansible-role/templates/meta/main.yml.j2 +++ b/playbooks/roles/ansible-role/templates/meta/main.yml.j2 @@ -1,7 +1,7 @@ --- {% include 'roles/ansible-role/templates/header.j2' %} # -# Role includes for role {{ role_name }} +# Role includes for role {{ my_role_name }} # # Example: # diff --git a/playbooks/roles/ansible-role/templates/tasks/main.yml.j2 b/playbooks/roles/ansible-role/templates/tasks/main.yml.j2 index cc12457a465..12534751d77 100644 --- a/playbooks/roles/ansible-role/templates/tasks/main.yml.j2 +++ b/playbooks/roles/ansible-role/templates/tasks/main.yml.j2 @@ -2,7 +2,7 @@ {% include 'roles/ansible-role/templates/header.j2' %} # -# Tasks for role {{ role_name }} +# Tasks for role {{ my_role_name }} # # Overview: # diff --git a/playbooks/roles/ansible_debug/tasks/main.yml b/playbooks/roles/ansible_debug/tasks/main.yml index adb4728cc74..a2822c46589 100644 --- a/playbooks/roles/ansible_debug/tasks/main.yml +++ b/playbooks/roles/ansible_debug/tasks/main.yml @@ -1,50 +1,55 @@ --- - name: Dump all vars to json - template: src=dumpall.json.j2 dest=/tmp/ansible.all.json mode=0600 + template: + src: dumpall.json.j2 + dest: /tmp/ansible.all.json + mode: 0600 tags: - - dumpall - - debug + - dumpall + - debug - name: Dump lms auth|env file - template: src=../../edxapp/templates/lms.{{item}}.json.j2 dest=/tmp/lms.{{item}}.json mode=0600 + template: + src: "../../edxapp/templates/lms.{{ item }}.json.j2" + dest: "/tmp/lms.{{ item }}.json" + mode: 0600 with_items: - env - auth when: "'lms' in service_variants_enabled" tags: - - dumpall - - debug - -- name: Dump lms-preview auth|env file - template: src=../../edxapp/templates/lms-preview.{{item}}.json.j2 dest=/tmp/lms-preview.{{item}}.json mode=0600 - with_items: - - env - - auth - when: "'lms-preview' in service_variants_enabled" - tags: - - dumpall - - debug + - dumpall + - debug - name: Dump cms auth|env file - template: src=../../edxapp/templates/cms.{{item}}.json.j2 dest=/tmp/cms.{{item}}.json mode=0600 + template: + src: "../../edxapp/templates/cms.{{ item }}.json.j2" + dest: "/tmp/cms.{{ item }}.json" + mode: 0600 with_items: - env - auth when: "'cms' in service_variants_enabled" tags: - - dumpall - - debug + - dumpall + - debug - name: Dump all vars to yaml - template: src=dumpall.yml.j2 dest=/tmp/ansible.all.yml mode=0600 + template: + src: dumpall.yml.j2 + dest: /tmp/ansible.all.yml + mode: 0600 tags: - - dumpall - - debug + - dumpall + - debug - name: fetch remote files # fetch is fail-safe for remote files that don't exist # setting mode is not an option - fetch: src=/tmp/{{item}} dest=/tmp/{{ansible_hostname}}-{{item}} flat=True + fetch: + src: "/tmp/{{ item }}" + dest: "/tmp/{{ ansible_hostname }}-{{item}}" + flat: True with_items: - ansible.all.json - ansible.all.yml @@ -55,5 +60,5 @@ - cms.env.json - cms.auth.json tags: - - dumpall - - debug + - dumpall + - debug diff --git a/playbooks/roles/antivirus/defaults/main.yml b/playbooks/roles/antivirus/defaults/main.yml new file mode 100644 index 00000000000..a5e7f71f583 --- /dev/null +++ b/playbooks/roles/antivirus/defaults/main.yml @@ -0,0 +1,45 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role antivirus +# + +# +# vars are namespace with the module name. +# +antivirus_role_name: antivirus + +# +# OS packages +# + +antivirus_debian_pkgs: [clamav] +antivirus_redhat_pkgs: [] +antivirus_pip_pkgs: [] + +antivirus_app_dir: "{{ COMMON_APP_DIR }}/antivirus" +antivirus_log_dir: "{{ COMMON_LOG_DIR }}/antivirus" +antivirus_user: "antivirus" + +ANTIVIRUS_S3_BUCKETS_SCAN: false +ANTIVIRUS_BUCKETS: !!null +ANTIVIRUS_MAILTO: "{{ EDXAPP_TECH_SUPPORT_EMAIL }}" +ANTIVIRUS_MAILFROM: "{{ EDXAPP_DEFAULT_FROM_EMAIL }}" +ANTIVIRUS_AWS_KEY: "" +ANTIVIRUS_AWS_SECRET: "" +ANTIVIRUS_S3_AWS_KEY: "{{ ANTIVIRUS_AWS_KEY }}" +ANTIVIRUS_SES_AWS_KEY: "{{ ANTIVIRUS_AWS_KEY }}" +ANTIVIRUS_S3_AWS_SECRET: "{{ ANTIVIRUS_AWS_SECRET}}" +ANTIVIRUS_SES_AWS_SECRET: "{{ ANTIVIRUS_AWS_SECRET}}" +ANTIVIRUS_SNITCH_URL: !!null + + +ANTIVIRUS_SERVER_SCAN: true +ANTIVIRUS_SCAN_DIRECTORY: "" diff --git a/playbooks/roles/antivirus/handlers/main.yml b/playbooks/roles/antivirus/handlers/main.yml new file mode 100644 index 00000000000..978a96d5037 --- /dev/null +++ b/playbooks/roles/antivirus/handlers/main.yml @@ -0,0 +1,16 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Handlers for role antivirus +# +# Overview: +# +# diff --git a/playbooks/roles/antivirus/meta/main.yml b/playbooks/roles/antivirus/meta/main.yml new file mode 100644 index 00000000000..72c3ee8e7d7 --- /dev/null +++ b/playbooks/roles/antivirus/meta/main.yml @@ -0,0 +1,14 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role antivirus +# +dependencies: + - common diff --git a/playbooks/roles/antivirus/tasks/main.yml b/playbooks/roles/antivirus/tasks/main.yml new file mode 100644 index 00000000000..f44bc10231f --- /dev/null +++ b/playbooks/roles/antivirus/tasks/main.yml @@ -0,0 +1,97 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role antivirus +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: install antivirus system packages + apt: pkg={{ item }} install_recommends=yes state=present + with_items: "{{ antivirus_debian_pkgs }}" + +- name: create antivirus scanner user + user: + name: "{{ antivirus_user }}" + home: "{{ antivirus_app_dir }}" + createhome: no + shell: /bin/false + +- name: create antivirus edx directories + file: + path: "{{ item }}" + state: directory + owner: "{{ antivirus_user }}" + group: "{{ antivirus_user }}" + with_items: + - "{{ antivirus_app_dir }}" + - "{{ antivirus_app_dir }}/data" + - "{{ antivirus_log_dir }}" + when: ANTIVIRUS_S3_BUCKETS_SCAN or ANTIVIRUS_SERVER_SCAN + +- name: Ensure the clamav.log file exists + file: + path: "{{ antivirus_log_dir }}/clamav.log" + state: touch + owner: "{{ antivirus_user }}" + group: "{{ antivirus_user }}" + mode: "0644" + when: ANTIVIRUS_S3_BUCKETS_SCAN or ANTIVIRUS_SERVER_SCAN + +- name: install antivirus s3 scanner script + template: + src: s3_bucket_virus_scan.sh.j2 + dest: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh" + mode: "0555" + owner: "{{ antivirus_user }}" + group: "{{ antivirus_user }}" + when: ANTIVIRUS_S3_BUCKETS_SCAN + +- name: install antivirus s3 scanner cronjob + cron: + name: "antivirus-{{ item }}" + job: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'" + backup: yes + cron_file: "antivirus-{{ item }}" + user: "{{ antivirus_user }}" + hour: "*" + minute: "0" + day: "*" + with_items: "{{ ANTIVIRUS_BUCKETS }}" + when: ANTIVIRUS_S3_BUCKETS_SCAN + +- name: install antivirus server scanner script + template: + src: server_virus_scan.sh.j2 + dest: "{{ antivirus_app_dir }}/server_virus_scan.sh" + mode: "0555" + owner: "{{ antivirus_user }}" + group: "{{ antivirus_user }}" + when: ANTIVIRUS_SERVER_SCAN + +- name: install antivirus server scanner cronjob + cron: + name: "antivirus" + job: "{{ antivirus_app_dir }}/server_virus_scan.sh" + backup: yes + cron_file: "antivirus" + user: "{{ antivirus_user }}" + hour: "*" + minute: "0" + day: "*" + when: ANTIVIRUS_SERVER_SCAN diff --git a/playbooks/roles/antivirus/templates/s3_bucket_virus_scan.sh.j2 b/playbooks/roles/antivirus/templates/s3_bucket_virus_scan.sh.j2 new file mode 100644 index 00000000000..afc1f4082ee --- /dev/null +++ b/playbooks/roles/antivirus/templates/s3_bucket_virus_scan.sh.j2 @@ -0,0 +1,80 @@ +#! /bin/bash + +DEBUG="false" +BUCKETNAME="none" +MAILTO="" +MAILFROM="" +ANTIVIRUS_S3_AWS_KEY="{{ ANTIVIRUS_S3_AWS_KEY }}" +ANTIVIRUS_SES_AWS_KEY="{{ ANTIVIRUS_SES_AWS_KEY }}" +ANTIVIRUS_S3_AWS_SECRET="{{ ANTIVIRUS_S3_AWS_SECRET}}" +ANTIVIRUS_SES_AWS_SECRET="{{ ANTIVIRUS_SES_AWS_SECRET}}" +AWS_DEFAULT_REGION="{{ aws_region }}" + + +function usage { + echo "$0 - $VERSION"; + echo "Run ClamAV against the contents of an S3 Bucket."; + echo "Usage: $0 [options]"; + echo "options:"; + echo " -d Debug mode"; + echo " -h Usage (this screen)"; + echo " -b "; + echo " -m "; + echo " -f "; + echo " -k "; + echo " -s " + +} + +while getopts "dhb:m:f:k:s:" optionName; do + case "$optionName" in + d) + DEBUG="true" + ;; + h) + usage; + exit; + ;; + [?]) + usage; + exit; + ;; + b) + BUCKETNAME=$OPTARG; + ;; + m) + MAILTO=$OPTARG; + ;; + f) + MAILFROM=$OPTARG; + ;; + k) + AWS_ACCESS_KEY_ID=$OPTARG; + ANTIVIRUS_S3_AWS_KEY=$OPTARG; + ANTIVIRUS_SES_AWS_KEY=$OPTARG; + ;; + s) + AWS_SECRET_ACCESS_KEY=$OPTARG; + ANTIVIRUS_S3_AWS_SECRET=$OPTARG; + ANTIVIRUS_SES_AWS_SECRET=$OPTARG; + ;; + esac +done + +cd {{ antivirus_app_dir }} + +export AWS_ACCESS_KEY_ID=$ANTIVIRUS_S3_AWS_KEY +export AWS_SECRET_ACCESS_KEY=$ANTIVIRUS_S3_AWS_SECRET +export AWS_DEFAULT_REGION + +mkdir -p data/$BUCKETNAME +aws s3 sync s3://$BUCKETNAME/ data/$BUCKETNAME +CLAMOUT=$(clamscan -ri data/$BUCKETNAME); +if [[ $? -ne 0 ]]; then + export AWS_ACCESS_KEY_ID=$ANTIVIRUS_SES_AWS_KEY + export AWS_SECRET_ACCESS_KEY=$ANTIVIRUS_SES_AWS_SECRET + aws ses send-email --to $MAILTO --from $MAILFROM --subject "Virus Scanner malicious file on $BUCKETNAME" --text "$CLAMOUT" +fi +{% if ANTIVIRUS_SNITCH_URL is defined %} +curl {{ ANTIVIRUS_SNITCH_URL }} +{% endif %} diff --git a/playbooks/roles/antivirus/templates/server_virus_scan.sh.j2 b/playbooks/roles/antivirus/templates/server_virus_scan.sh.j2 new file mode 100644 index 00000000000..ce113ecdf4c --- /dev/null +++ b/playbooks/roles/antivirus/templates/server_virus_scan.sh.j2 @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +{% if ANTIVIRUS_SCAN_DIRECTORY is defined %} +dir_to_scan="{{ ANTIVIRUS_SCAN_DIRECTORY }}" +{% endif %} + +log_dir={{ antivirus_log_dir }} + +clamout=$(clamscan -ri $dir_to_scan); +malware=$(echo "$clamout" | grep -i 'Infected' | cut -d ' ' -f3) + +if [[ "$malware" -ne 0 ]]; then + echo -e "Malware Found\n$clamout" >> "$log_dir/clamav.log" +fi diff --git a/playbooks/roles/apache/defaults/main.yml b/playbooks/roles/apache/defaults/main.yml index 80e67625926..9ec7de0439e 100644 --- a/playbooks/roles/apache/defaults/main.yml +++ b/playbooks/roles/apache/defaults/main.yml @@ -1,2 +1,6 @@ --- -apache_port: 80 +apache_ports: + - 80 +apache_sites: + - lms +apache_template_dir: '.' diff --git a/playbooks/roles/apache/handlers/main.yml b/playbooks/roles/apache/handlers/main.yml index 407739b2160..943db41d150 100644 --- a/playbooks/roles/apache/handlers/main.yml +++ b/playbooks/roles/apache/handlers/main.yml @@ -1,3 +1,7 @@ --- - name: restart apache - service: name=apache2 state=restarted + service: + name: apache2 + state: restarted + tags: + - deploy diff --git a/playbooks/roles/mongo/meta/main.yml b/playbooks/roles/apache/meta/main.yml similarity index 100% rename from playbooks/roles/mongo/meta/main.yml rename to playbooks/roles/apache/meta/main.yml diff --git a/playbooks/roles/apache/tasks/apache_site.yml b/playbooks/roles/apache/tasks/apache_site.yml deleted file mode 100644 index 88ee1f2121b..00000000000 --- a/playbooks/roles/apache/tasks/apache_site.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Requires nginx package ---- -- name: Copying apache config {{ site_name }} - template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }} - first_available_file: - - "{{ local_dir }}/apache/templates/{{ site_name }}.j2" - # seems like paths in first_available_file must be relative to the playbooks dir - - "roles/apache/templates/{{ site_name }}.j2" - notify: restart apache - when: apache_role_run is defined - tags: - - apache - - update - -- name: Creating apache2 config link {{ site_name }} - file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root - notify: restart apache - when: apache_role_run is defined - tags: - - apache - - update diff --git a/playbooks/roles/apache/tasks/main.yml b/playbooks/roles/apache/tasks/main.yml index 129cb5ecf14..a2041a1e8d0 100644 --- a/playbooks/roles/apache/tasks/main.yml +++ b/playbooks/roles/apache/tasks/main.yml @@ -1,36 +1,49 @@ -#Installs apache and runs the lms wsgi +# Installs apache and runs the lms wsgi by default --- - - name: Installs apache and mod_wsgi from apt - apt: pkg={{item}} install_recommends=no state=present update_cache=yes + apt: + name: "{{ item }}" + install_recommends: no + state: present + update_cache: yes with_items: - apache2 - libapache2-mod-wsgi notify: restart apache - tags: - - apache - - install - -- name: disables default site - command: a2dissite 000-default + +- name: Disables default site + file: + path: /etc/apache2/sites-enabled/000-default + state: absent notify: restart apache - tags: - - apache - - install - -- name: rewrite apache ports conf - template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root + +- name: Rewrite apache ports conf + template: + src: ports.conf.j2 + dest: /etc/apache2/ports.conf + owner: root + group: root notify: restart apache - tags: - - apache - - install -- name: Register the fact that apache role has run - command: echo True - register: apache_role_run - tags: - - apache - - install +- debug: + msg: "{{ apache_sites }}" +- name: "Copying apache configs for {{ apache_sites }}" + template: + src: "{{ item }}.j2" + dest: "/etc/apache2/sites-available/{{ item }}" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + notify: restart apache + with_items: "{{ apache_sites }}" -- include: apache_site.yml state=link site_name=lms +- name: "Creating apache2 config links for {{ apache_sites }}" + file: + src: "/etc/apache2/sites-available/{{ item }}" + dest: "/etc/apache2/sites-enabled/{{ item }}" + state: link + owner: root + group: root + notify: restart apache + with_items: "{{ apache_sites }}" diff --git a/playbooks/roles/apache/templates/lms.j2 b/playbooks/roles/apache/templates/lms.j2 index 12469415ce3..2f5c068a5b9 100644 --- a/playbooks/roles/apache/templates/lms.j2 +++ b/playbooks/roles/apache/templates/lms.j2 @@ -1,20 +1,20 @@ WSGIPythonHome {{ edxapp_venv_dir }} WSGIRestrictEmbedded On - + ServerName https://{{ lms_env_config.SITE_NAME }} ServerAlias *.{{ lms_env_config.SITE_NAME }} UseCanonicalName On - + Alias /static/ /opt/wwc/staticfiles/ - + Order deny,allow Allow from all SetEnv SERVICE_VARIANT lms - + WSGIScriptAlias / {{ edxapp_code_dir }}/lms/wsgi_apache_lms.py @@ -42,10 +42,10 @@ WSGIRestrictEmbedded On WSGIDaemonProcess lms user=www-data group=adm processes=1 python-path={{ edxapp_code_dir }}:{{ edxapp_venv_dir }}/lib/python2.7/site-packages display-name=%{GROUP} WSGIProcessGroup lms WSGIApplicationGroup %{GLOBAL} - + ErrorLog ${APACHE_LOG_DIR}/apache-edx-error.log LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %D" apache-edx - CustomLog ${APACHE_LOG_DIR}/apache-edx-access.log apache-edx + CustomLog {{ APACHE_LOG_DIR }}/apache-edx-access.log apache-edx diff --git a/playbooks/roles/apache/templates/ports.conf.j2 b/playbooks/roles/apache/templates/ports.conf.j2 index 5083880c1b9..83e884f3f50 100644 --- a/playbooks/roles/apache/templates/ports.conf.j2 +++ b/playbooks/roles/apache/templates/ports.conf.j2 @@ -1,2 +1,4 @@ -NameVirtualHost *:{{apache_port}} -Listen {{apache_port}} +{%- for port in apache_ports -%} +NameVirtualHost *:{{ port }} +Listen {{ port }} +{% endfor %} diff --git a/playbooks/roles/authn/defaults/main.yml b/playbooks/roles/authn/defaults/main.yml new file mode 100644 index 00000000000..c7a95c4039e --- /dev/null +++ b/playbooks/roles/authn/defaults/main.yml @@ -0,0 +1,2 @@ +authn_env_extra: + SHOW_CONFIGURABLE_EDX_FIELDS: "{{ AUTHN_SHOW_CONFIGURABLE_EDX_FIELDS }}" diff --git a/playbooks/roles/authn/tasks/main.yml b/playbooks/roles/authn/tasks/main.yml new file mode 100644 index 00000000000..75f3b9d7f3b --- /dev/null +++ b/playbooks/roles/authn/tasks/main.yml @@ -0,0 +1,10 @@ +- name: Build Authn MFE + include_role: + name: mfe + vars: + MFE_NAME: authn + MFE_VERSION: '{{ AUTHN_MFE_VERSION }}' + MFE_SITE_NAME: 'Your Platform Name Here' + MFE_NPM_OVERRIDES: '{{ AUTHN_MFE_NPM_OVERRIDES | default(MFE_DEPLOY_NPM_OVERRIDES) }}' + MFE_ENVIRONMENT_EXTRA: '{{ authn_env_extra | default(MFE_DEPLOY_ENVIRONMENT_EXTRA) }}' + MFE_GIT_PATH: 'openedx' diff --git a/playbooks/roles/automated/defaults/main.yml b/playbooks/roles/automated/defaults/main.yml index e600f085af2..ee7a08b59cc 100644 --- a/playbooks/roles/automated/defaults/main.yml +++ b/playbooks/roles/automated/defaults/main.yml @@ -2,26 +2,20 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # -## -# Vars for role automated -# - # -# vars are namespace with the module name. +# Vars for role automated # + automated_role_name: automated -automated_user: "automator" -automated_home: "/home/automator" -automated_rbash_links: !!null -automated_sudoers_template: !!null -automated_sudoers_file: !!null - - # +AUTOMATED_USER: "changeme" +automated_sudoers_template: "99-automated.j2" + +# # OS packages # diff --git a/playbooks/roles/automated/files/home/automator/.profile b/playbooks/roles/automated/files/home/automator/.profile deleted file mode 100644 index 476e8685e09..00000000000 --- a/playbooks/roles/automated/files/home/automator/.profile +++ /dev/null @@ -1 +0,0 @@ -. .bashrc diff --git a/playbooks/roles/automated/meta/main.yml b/playbooks/roles/automated/meta/main.yml new file mode 100644 index 00000000000..9cbd3d8ae55 --- /dev/null +++ b/playbooks/roles/automated/meta/main.yml @@ -0,0 +1,12 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# + +# Allow this role to be duplicated in dependencies. +allow_duplicates: yes diff --git a/playbooks/roles/automated/tasks/main.yml b/playbooks/roles/automated/tasks/main.yml index 174ef778306..e92155b9eb9 100644 --- a/playbooks/roles/automated/tasks/main.yml +++ b/playbooks/roles/automated/tasks/main.yml @@ -2,142 +2,110 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # Tasks for role automated -# +# # Overview: # # This role is included as a dependency by other roles which provide -# automated jobs. Automation occurs over ssh. The automator user -# is assigned to a managed rbash shell and is, potentially, allowed to run -# explicitly listed commands via sudo. Both the commands that are -# allowed via rbash and the sudoers file are provided by the -# including role. +# automated jobs. Automation occurs over ssh. The automator user is +# is allowed to run explicitly listed commands via sudo. # # Dependencies: # # This role depends upon variables provided by an including role # via the my_role/meta/main.yml file. Includes take the following forms: # -# dependencies: -# - { -# role: automated, -# automated_rbash_links: $as_automated_rbash_links, -# automated_sudoers_dest: '99-my_role' -# automated_sudoers_file: 'roles/my_role/files/etc/sudoers.d/99-my_role' -# } -# -# or +# For example, the edxapp role might designate that ecom and analytics users +# are allowed to run specific management commands on edxapp instancs. # -# dependencies: -# - { -# role: automated, -# automated_rbash_links: $as_automated_rbash_links, -# automated_sudoers_dest: '99-my_role' -# automated_sudoers_template: 'roles/my_role/templates/etc/sudoers.d/99-my_role.j2' -# } +# EDXAPP_AUTOMATED_USERS: +# ecom: +# sudo_commands: +# - command: "/edx/app/edxapp/edx-platform/manage.py lms showmigrations --settings=production" +# python_prefix: /edx/app/edxapp/venvs/edxapp/bin/python +# sudo_user: "edxapp" +# - command: "/edx/app/edxapp/edx-platform/manage.py cms showmigrations --settings=production" +# python_prefix: /edx/app/edxapp/venvs/edxapp/bin/python +# sudo_user: "edxapp" +# authorized_keys: +# - 'ssh-rsa ecom+admin@example.com' +# - 'ssh-rsa ecom+devel@example.com' +# analytics: +# sudo_commands: +# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms help --settings=production" +# sudo_user: "edxapp" +# authorized_keys: ['ssh-rsa analytics@example.com'] # -# The sudoers file is optional. Note that for sudo to work it must be -# included in the rbash links list. +# The play for the role enabling automation should include the role like so # -# That list should be provided via my_role's defaults -# -# role_automated_rbash_links: -# - /usr/bin/sudo -# - /usr/bin/scp +# - role: automated +# AUTOMATED_USERS: "{{ EDXAPP_AUTOMATED_USERS | default({}) }}" # -- fail: automated_rbash_links required for role - when: automated_rbash_links is not defined +- name: Create automated user + user: + name: "{{ item.key }}" + state: present + shell: "/bin/bash" + createhome: yes + with_dict: "{{ AUTOMATED_USERS }}" -- fail: automated_sudoers_dest required for role - when: automated_sudoers_dest is not defined - -- name: create automated user - user: - name={{ automated_user }} state=present shell=/bin/rbash - home={{ automated_home }} createhome=yes +- name: Create sudoers file from template + template: + dest: "/etc/sudoers.d/99-{{ item.key }}" + src: "{{ automated_sudoers_template }}" + owner: "root" + group: "root" + mode: "0440" + validate: 'visudo -cf %s' + with_dict: "{{ AUTOMATED_USERS }}" -- name: create sudoers file from file - copy: - dest=/etc/sudoers.d/{{ automated_sudoers_dest }} - src={{ automated_sudoers_file }} owner="root" - group="root" mode=0440 validate='visudo -cf %s' - when: automated_sudoers_file +- name: Create .ssh directory + file: + path: "/home/{{ item.key }}/.ssh" + state: "directory" + mode: "0700" + owner: "{{ item.key }}" + group: "{{ item.key }}" + with_dict: "{{ AUTOMATED_USERS }}" -- name: create sudoers file from template +- name: Build authorized_keys file template: - dest=/etc/sudoers.d/{{ automated_sudoers_dest }} - src={{ automated_sudoers_template }} owner="root" - group="root" mode=0440 validate='visudo -cf %s' - when: automated_sudoers_template + src: "home/automator/.ssh/authorized_keys.j2" + dest: "/home/{{ item.key }}/.ssh/authorized_keys" + mode: "0600" + owner: "{{ item.key }}" + group: "{{ item.key }}" + with_dict: "{{ AUTOMATED_USERS }}" - # - # Prevent user from updating their PATH and - # environment. - # -- name: update shell file mode +- name: Build known_hosts file file: - path={{ automated_home }}/{{ item }} mode=0640 - state=file owner="root" group={{ automated_user }} - with_items: - - .bashrc - - .profile - - .bash_logout - -- name: change ~automated ownership - file: - path={{ automated_home }} mode=0750 state=directory - owner="root" group={{ automated_user }} + path: "/home/{{ item.key }}/.ssh/known_hosts" + state: "touch" + mode: "0755" + owner: "{{ item.key }}" + group: "{{ item.key }}" + with_dict: "{{ AUTOMATED_USERS }}" - # - # This ensures that the links are updated with each run - # and that links that were remove from the role are - # removed. - # -- name: remove ~automated/bin directory - file: - path={{ automated_home }}/bin state=absent - ignore_errors: yes - -- name: create ~automated/bin directory - file: - path={{ automated_home }}/bin state=directory mode=0750 - owner="root" group={{ automated_user }} - -- name: re-write .profile - copy: - src=home/automator/.profile - dest={{ automated_home }}/.profile - owner="root" - group={{ automated_user }} - mode="0744" - -- name: re-write .bashrc - copy: - src=home/automator/.bashrc - dest={{ automated_home }}/.bashrc - owner="root" - group={{ automated_user }} - mode="0744" - -- name: create .ssh directory - file: - path={{ automated_home }}/.ssh state=directory mode=0700 - owner={{ automated_user }} group={{ automated_user }} +- name: remove ssh AllowUsers directive + lineinfile: + dest: /etc/ssh/sshd_config + regexp: "^AllowUsers" + state: absent + register: users_ssh_access -- name: build authorized_keys file - template: - src=home/automator/.ssh/authorized_keys.j2 - dest={{ automated_home }}/.ssh/authorized_keys mode=0600 - owner={{ automated_user }} group={{ automated_user }} - -- name: create allowed command links - file: - src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }} - state=link - with_items: automated_rbash_links \ No newline at end of file +- name: restart ssh + service: + name: ssh + state: restarted + when: users_ssh_access.changed + +- name: Add management command users to docker group + command: 'sudo usermod -aG docker "{{ item }}"' + when: ENABLE_DOCKER_ACCESS_FOR_AUTOMATED_USERS is defined and ENABLE_DOCKER_ACCESS_FOR_AUTOMATED_USERS + with_items: '{{ AUTOMATED_USERS }}' diff --git a/playbooks/roles/automated/templates/99-automated.j2 b/playbooks/roles/automated/templates/99-automated.j2 new file mode 100644 index 00000000000..e02e446935e --- /dev/null +++ b/playbooks/roles/automated/templates/99-automated.j2 @@ -0,0 +1,6 @@ +{% for command in item.value.sudo_commands %} +{% if "python_prefix" in command and command.python_prefix != "" -%} +{{ item.key }} ALL=({{ command.sudo_user }}) SETENV:NOPASSWD:{{ command.python_prefix }} {{ command.command | replace('\'', '') }} +{% endif %} +{{ item.key }} ALL=({{ command.sudo_user }}) SETENV:NOPASSWD:{{ command.command | replace('\'', '') }} +{% endfor %} diff --git a/playbooks/roles/automated/templates/home/automator/.ssh/authorized_keys.j2 b/playbooks/roles/automated/templates/home/automator/.ssh/authorized_keys.j2 index e20af0ea192..3446015960f 100644 --- a/playbooks/roles/automated/templates/home/automator/.ssh/authorized_keys.j2 +++ b/playbooks/roles/automated/templates/home/automator/.ssh/authorized_keys.j2 @@ -1,5 +1,5 @@ # {{ ansible_managed }} -{% for line in automated_authorized_keys -%} +{% for line in item.value.authorized_keys -%} {{ line }} -{%- endfor %} \ No newline at end of file +{% endfor %} \ No newline at end of file diff --git a/playbooks/roles/aws/defaults/main.yml b/playbooks/roles/aws/defaults/main.yml index 17222f94720..f433e3c5a68 100644 --- a/playbooks/roles/aws/defaults/main.yml +++ b/playbooks/roles/aws/defaults/main.yml @@ -2,68 +2,56 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Defaults for role aws # -# -# Rotate logs to S3 -# Only for when edX is running in AWS since it organizes -# logs by security group. -# !! The buckets defined below MUST exist prior to enabling !! -# this feature and the instance IAM role must have write permissions -# to the buckets -AWS_S3_LOGS: false + +# Both of these vars are required to work-around +# some ansible variable precedence issues with +# circular dependencies introduced in the openstack PR. +# More investigation is required to determine the optimal +# solution. +vhost_name: aws +VHOST_NAME: "{{ vhost_name }}" + # If there are any issues with the s3 sync an error # log will be sent to the following address. # This relies on your server being able to send mail AWS_S3_LOGS_NOTIFY_EMAIL: dummy@example.com AWS_S3_LOGS_FROM_EMAIL: dummy@example.com -# Separate buckets for tracking logs and everything else -# You should be overriding the environment and deployment vars -# Order of precedence is left to right for exclude and include options -AWS_S3_LOG_PATHS: - - bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs" - path: "{{ COMMON_LOG_DIR }}/!(*tracking*)" - - bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs" - path: "/var/log/*" - - bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-tracking-logs" - path: "{{ COMMON_LOG_DIR }}/*tracking*" - -# set this to true dump all extra vars -# this is currently broken when extra vars -# contains references to vars that are not -# included in the play. -AWS_DUMP_VARS: false - -# -# vars are namespace with the module name. -# -aws_role_name: aws -aws_data_dir: "{{ COMMON_DATA_DIR }}/aws" -aws_app_dir: "{{ COMMON_APP_DIR }}/aws" -aws_var_file: "{{ aws_app_dir }}/server-vars.yml" -aws_s3_sync_script: "{{ aws_app_dir }}/send-logs-to-s3" -aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log" -aws_log_dir: "{{ COMMON_LOG_DIR }}/aws" +# Credentials for S3 access in case the instance role doesn't have write +# permissions to S3 +AWS_S3_LOGS_ACCESS_KEY_ID: "" +AWS_S3_LOGS_SECRET_KEY: "" +aws_s3_sync_script: "{{ vhost_dirs.home.path }}/send-logs-to-s3" +aws_s3_logfile: "{{ vhost_dirs.logs.path }}/s3-log-sync.log" +aws_region: "us-east-1" # default path to the aws binary -aws_cmd: "{{ COMMON_BIN_DIR }}/s3cmd" +aws_s3cmd: "/usr/bin/s3cmd" +aws_cmd: "/usr/local/bin/aws" +aws_requirements: "{{ vhost_dirs.home.path }}/requirements.txt" + # # OS packages # aws_debian_pkgs: - python-setuptools - -aws_pip_pkgs: - - https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz - - awscli + - s3cmd aws_redhat_pkgs: [] -aws_s3cmd_version: s3cmd-1.5.0-beta1 -aws_s3cmd_url: "http://files.edx.org/s3cmd/%7B%7B%20aws_s3cmd_version%20%7D%7D.tar.gz" + +# The AWS_GATHER_FACTS switch is used to enable/disable data gathering +# from ec2 instances. +# This is needed in some deployments were S3 is being used for file storage but +# the appserver is in another cloud provider, such as OpenStack. +# This issues started happening after the ec2_facts role was replaced with +# the new version `ec2_metadata_facts` that fails when the server is not +# on AWS, unlike its older counterpart +AWS_GATHER_FACTS: true diff --git a/playbooks/roles/aws/meta/main.yml b/playbooks/roles/aws/meta/main.yml index 421f606deb2..a83bb5c5fc3 100644 --- a/playbooks/roles/aws/meta/main.yml +++ b/playbooks/roles/aws/meta/main.yml @@ -2,21 +2,14 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Role includes for role aws # -# Example: -# -# dependencies: -# - { -# role: my_role -# my_role_var0: "foo" -# my_role_var1: "bar" -# } dependencies: - - common + - role: vhost + VHOST_NAME: "{{ vhost_name }}" diff --git a/playbooks/roles/aws/tasks/main.yml b/playbooks/roles/aws/tasks/main.yml index c24ee38c205..988a4ae65e0 100644 --- a/playbooks/roles/aws/tasks/main.yml +++ b/playbooks/roles/aws/tasks/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -21,128 +21,99 @@ # # -- name: create data and app directories - file: > - path={{ item }} - state=directory - owner=root - group=root - mode=0700 - with_items: - - "{{ aws_data_dir }}" - - "{{ aws_app_dir }}" - - "{{ aws_log_dir }}" - -- name: install system packages - apt: > - pkg={{','.join(aws_debian_pkgs)}} - state=present - update_cache=yes - -- name: install aws python packages - pip: > - name="{{ item }}" state=present - extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" - with_items: aws_pip_pkgs - -- name: get s3cmd - get_url: > - url={{ aws_s3cmd_url }} - dest={{ aws_data_dir }}/ - -- name: untar s3cmd - shell: > - tar xf {{ aws_data_dir }}/{{ aws_s3cmd_version }}.tar.gz - creates={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd - chdir={{ aws_app_dir }} - -- name: create symlink for s3cmd - file: > - src={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd - dest={{ COMMON_BIN_DIR }}/s3cmd - state=link - -# The sync script and config file are now symlinks -# Remove them if they are regular files -# This can be removed once we don't need to worry -# about backwards compatibility. -- stat: path={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }} - register: sync_script - -- stat: path={{ COMMON_CFG_DIR}}/{{ aws_var_file|basename }} - register: var_file - -- file: path={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }} state=absent - when: sync_script.stat.exists and sync_script.stat.isreg - -- file: path={{ COMMON_CFG_DIR}}/{{ aws_var_file|basename }} state=absent - when: var_file.stat.exists and var_file.stat.isreg - -- name: dump all vars to yaml - template: src=dumpall.yml.j2 dest={{ aws_var_file }} mode=0600 - when: AWS_DUMP_VARS - -- name: create symlink for config file - file: > - src={{ aws_var_file }} - dest={{ COMMON_CFG_DIR }}/{{ aws_var_file|basename }} - state=link - when: AWS_DUMP_VARS +# +# Start dealing with Jumbo frames issue in mixed MTU deployements in AWS +# +- name: Gather ec2 facts for use in other roles + action: ec2_metadata_facts + no_log: True + when: AWS_GATHER_FACTS + tags: + - deploy + +- name: Set the MTU to 1500 temporarily + shell: "/sbin/ifconfig {{ ansible_default_ipv4.interface }} mtu 1500 up" + when: ansible_distribution in common_debian_variants + +- name: Set the MTU to 1500 permanently + template: + dest: /etc/network/if-up.d/mtu + src: mtu.j2 + mode: 0755 + owner: root + group: root + when: ansible_distribution in common_debian_variants +# +# End dealing with Jumbo frames issue in mixed MTU deployements in AWS +# -- name: clean up var file, removing all version vars and internal ansible vars - shell: sed -i -e "/{{item}}/d" {{ aws_var_file }} +- name: Install system packages + apt: + name: "{{ aws_debian_pkgs }}" + state: present + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: ansible_distribution in common_debian_variants + +- name: Copy the Python requirements file + template: + src: "requirements.txt.j2" + dest: "{{ aws_requirements }}" + owner: root + group: root + mode: 0644 + +- name: Install aws python packages + pip: + requirements: "{{ aws_requirements }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + +- name: Copy the boto global config file + template: + src: "boto.cfg.j2" + dest: "/etc/boto.cfg" + owner: root + group: root + mode: 0644 + +- name: Create s3 log sync script + template: + dest: "{{ aws_s3_sync_script }}" + src: send-logs-to-s3.j2 + mode: 0755 + owner: root + group: root + when: COMMON_OBJECT_STORE_LOG_SYNC + +- name: Create symlink for s3 log sync script + file: + state: link + src: "{{ aws_s3_sync_script }}" + dest: "{{ COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT }}" + when: COMMON_OBJECT_STORE_LOG_SYNC + + # update the ssh motd on Ubuntu + # Remove some of the default motd display on ubuntu + # and add a custom motd. These do not require an + # ssh restart + # Only needed for EC2 instances. +- name: Check if files exist so the next task doesn't fail + stat: + path: "{{ item }}" + register: motd_files_exist with_items: - # deploy versions - - "^edx_platform_version:" - - "^edx_platform_commit:" - - "^xqueue_version:" - - "^forum_version:" - - "^xserver_version:" - - "^discern_ease_version:" - - "^ora_ease_version:" - - "^discern_version:" - - "^ora_version:" - - "^configuration_version:" - - "^ease_version:" - - "^certs_version:" - # other misc vars - - "^tags:" - - "^_original_file:" - - "^register:" - - "^item:" - - "^failed_when:" - - "^changed_when:" - - "^delegate_to:" - - "^ansible_ssh_private_key_file:" - - "^always_run:" - when: AWS_DUMP_VARS - - -- name: create s3 log sync script - template: > - dest={{ aws_s3_sync_script }} - src=send-logs-to-s3.j2 mode=0755 owner=root group=root - when: AWS_S3_LOGS - -- name: create symlink for s3 log sync script - file: > - state=link - src={{ aws_s3_sync_script }} - dest={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }} - when: AWS_S3_LOGS - -- name: run s3 log sync script on shutdown - file: > - state=link - src={{ COMMON_BIN_DIR }}/send-logs-to-s3 - path=/etc/rc0.d/S00send-logs-to-s3 - when: AWS_S3_LOGS - - # cron job runs the aws s3 sync script -- name: cronjob for s3 log sync - cron: > - name="cronjob for s3 log sync" - user=root - minute=0 - job={{ aws_s3_sync_script }} - when: AWS_S3_LOGS + - "/etc/update-motd.d/10-help-text" + - "/usr/share/landscape/landscape-sysinfo.wrapper" + - "/etc/update-motd.d/51-cloudguest" + - "/etc/update-motd.d/91-release-upgrade" + +- name: Update the ssh motd on Ubuntu + file: + path: "{{ item.item }}" + mode: "0644" + when: vagrant_home_dir.stat.exists == False and ansible_distribution in common_debian_variants and item.stat.exists + with_items: "{{ motd_files_exist.results }}" diff --git a/playbooks/roles/aws/templates/boto.cfg.j2 b/playbooks/roles/aws/templates/boto.cfg.j2 new file mode 100644 index 00000000000..eeb28bdfcd8 --- /dev/null +++ b/playbooks/roles/aws/templates/boto.cfg.j2 @@ -0,0 +1,2 @@ +[Boto] +http_socket_timeout = 3 diff --git a/playbooks/roles/aws/templates/mtu.j2 b/playbooks/roles/aws/templates/mtu.j2 new file mode 100644 index 00000000000..e0c04b016a4 --- /dev/null +++ b/playbooks/roles/aws/templates/mtu.j2 @@ -0,0 +1,2 @@ +#!/bin/sh +ifconfig {{ ansible_default_ipv4.interface }} mtu 1500 diff --git a/playbooks/roles/aws/templates/requirements.txt.j2 b/playbooks/roles/aws/templates/requirements.txt.j2 new file mode 100644 index 00000000000..2b3b3b0edda --- /dev/null +++ b/playbooks/roles/aws/templates/requirements.txt.j2 @@ -0,0 +1,49 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +awscli==1.32.2 + # via -r requirements/aws.in +boto==2.49.0 + # via -r requirements/aws.in +boto3==1.34.2 + # via -r requirements/aws.in +botocore==1.34.2 + # via + # awscli + # boto3 + # s3transfer +colorama==0.4.4 + # via awscli +docutils==0.16 + # via awscli +jmespath==1.0.1 + # via + # boto3 + # botocore +pyasn1==0.5.1 + # via rsa +python-dateutil==2.8.2 + # via + # botocore + # s3cmd +python-magic==0.4.27 + # via s3cmd +pyyaml==5.3.1 + # via + # -r requirements/aws.in + # awscli +rsa==4.7.2 + # via awscli +s3cmd==2.4.0 + # via -r requirements/aws.in +s3transfer==0.9.0 + # via + # awscli + # boto3 +six==1.16.0 + # via python-dateutil +urllib3==1.26.18 + # via botocore diff --git a/playbooks/roles/aws/templates/send-logs-to-s3.j2 b/playbooks/roles/aws/templates/send-logs-to-s3.j2 index 10fc12e940c..7fa6a492086 100644 --- a/playbooks/roles/aws/templates/send-logs-to-s3.j2 +++ b/playbooks/roles/aws/templates/send-logs-to-s3.j2 @@ -4,38 +4,50 @@ # # This script can be called from logrotate # to sync logs to s3 +# if (( $EUID != 0 )); then echo "Please run as the root user" exit 1 fi -S3_LOGFILE="{{ aws_s3_logfile }}" -NOTIFY_EMAIL={{ AWS_S3_LOGS_NOTIFY_EMAIL }} -FROM_EMAIL={{ AWS_S3_LOGS_FROM_EMAIL }} -AWS_CMD={{ aws_cmd }} +# +# Ensure the log processors can read without +# running as root +if [ ! -f "{{ aws_s3_logfile }}" ]; then + sudo -u syslog touch "{{ aws_s3_logfile }}" +else + chown syslog.syslog "{{ aws_s3_logfile }}" +fi -exec > >(tee $S3_LOGFILE) +exec > >(tee -a "{{ aws_s3_logfile }}") exec 2>&1 +# s3cmd sync requires a valid home +# directory +export HOME=/ + shopt -s extglob usage() { cat< $message_file + message_string="Error syncing $s3_path: inst_id=$instance_id ip=$ip region={{ aws_region }}" + if [[ -r "{{ aws_s3_logfile }}" ]]; then + python3 -c "import json; d={'Subject':{'Data':'$message_string'},'Body':{'Text':{'Data':open('"{{ aws_s3_logfile }}"').read()}}};print(json.dumps(d))" > $message_file else cat << EOF > $message_file {"Subject": { "Data": "$message_string" }, "Body": { "Text": { "Data": "!! ERROR !! no logfile" } } } EOF fi - echo "ERROR: syncing $s3_path on $instance_id" - $AWS_CMD ses send-email --from $FROM_EMAIL --to $NOTIFY_EMAIL --message file://$message_file --region $region + echo "Error syncing $s3_path on $instance_id" + {{ aws_cmd }} ses send-email --from {{ AWS_S3_LOGS_FROM_EMAIL }} --to {{ AWS_S3_LOGS_NOTIFY_EMAIL }} --message file://$message_file --region {{ aws_region }} else echo "Error syncing $s3_path on $instance_id" fi } -trap onerror ERR SIGHUP SIGINT SIGTERM +trap onerror ERR SIGHUP SIGINT SIGTERM # first security group is used as the directory name in the bucket sec_grp=$(ec2metadata --security-groups | head -1) @@ -91,9 +118,13 @@ instance_id=$(ec2metadata --instance-id) ip=$(ec2metadata --local-ipv4) availability_zone=$(ec2metadata --availability-zone) # region isn't available via the metadata service -region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1} +region=${availability_zone:0:${{ lb }}#availability_zone{{ rb }} - 1} + +{% if AWS_S3_LOGS_ACCESS_KEY_ID %} +auth_opts="--access_key {{ AWS_S3_LOGS_ACCESS_KEY_ID }} --secret_key {{ AWS_S3_LOGS_SECRET_KEY }}" +{% else %} +auth_opts="" +{% endif %} s3_path="${2}/$sec_grp/" -{% for item in AWS_S3_LOG_PATHS -%} -$noop $AWS_CMD sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/" -{% endfor %} +$noop {{ aws_s3cmd }} $auth_opts --multipart-chunk-size-mb 5120 --disable-multipart sync $directory/* "s3://${bucket}/${prefix}${sec_grp}/${instance_id}-${ip}/" diff --git a/playbooks/roles/aws_cloudwatch_agent/defaults/main.yml b/playbooks/roles/aws_cloudwatch_agent/defaults/main.yml new file mode 100644 index 00000000000..64e5d37c2b6 --- /dev/null +++ b/playbooks/roles/aws_cloudwatch_agent/defaults/main.yml @@ -0,0 +1,26 @@ +--- + +# +# Defaults specified here should not contain +# any secrets or host identifying information. +# + +# The Amazon CloudWatch Agent URL +cloudwatch_url: https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb + +# Default cloudwatch namespace +cloudwatch_namespace: Analytics/Monitor + +# Populate the cloudwatch_procstat_patterns with patterns that you want to pass to the procstat config. +# If the list is empty, then the procstat is not enabled. +cloudwatch_procstat_patterns: [] + +# Collectd installation parameters +collectd_version: "5.9.2.g-1ubuntu5" +collectd_install_recommends: yes + +# CloudWatch logs configuration +cloudwatch_logs_enabled: false +# List of objects with `file_path`, `log_group_name` and `log_stream_name` keys. +# See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html#CloudWatch-Agent-Configuration-File-Logssection +cloudwatch_logs_collect_list: [] diff --git a/playbooks/roles/aws_cloudwatch_agent/tasks/main.yml b/playbooks/roles/aws_cloudwatch_agent/tasks/main.yml new file mode 100644 index 00000000000..243e8a7b7d9 --- /dev/null +++ b/playbooks/roles/aws_cloudwatch_agent/tasks/main.yml @@ -0,0 +1,67 @@ +--- + +- name: Install Collectd to use with AWS CloudWatch Agent + apt: + name: collectd={{ collectd_version }} + install_recommends: "{{ collectd_install_recommends }}" + state: present + tags: + - install + - install:base + - install:cloudwatch + +- name: Install xz-utils (required when using deb parameter of apt module) + apt: + name: xz-utils + state: present + tags: + - install + - install:base + - install:cloudwatch + +- name: Download the AWS CloudWatch Agent Debian package + get_url: + url: "{{ cloudwatch_url }}" + dest: /tmp/amazon-cloudwatch-agent.deb + tags: + - install + - install:base + - install:cloudwatch + +- name: Install AWS CloudWatch Agent Debian package + apt: + deb: /tmp/amazon-cloudwatch-agent.deb + state: present + tags: + - install + - install:base + - install:cloudwatch + +- name: Copy amazon-cloudwatch-agent template + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "{{ item.mode }}" + with_items: + - { src: 'amazon-cloudwatch-agent.json.j2', dest: '/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json', mode: '0644' } + tags: + - install + - install:base + - install:cloudwatch + +- name: Enable AWS CloudWatch Agent + service: + name: amazon-cloudwatch-agent + enabled: yes + tags: + - install + - install:base + - install:cloudwatch + +- name: Run AWS CloudWatch Agent + shell: "/opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json" + become: yes + tags: + - install + - install:code + - install:cloudwatch diff --git a/playbooks/roles/aws_cloudwatch_agent/templates/amazon-cloudwatch-agent.json.j2 b/playbooks/roles/aws_cloudwatch_agent/templates/amazon-cloudwatch-agent.json.j2 new file mode 100644 index 00000000000..4662d8ea2c7 --- /dev/null +++ b/playbooks/roles/aws_cloudwatch_agent/templates/amazon-cloudwatch-agent.json.j2 @@ -0,0 +1,72 @@ +{ + "agent": { + "metrics_collection_interval": 60, + "run_as_user": "root" + }, + "metrics": { + "namespace": "{{ cloudwatch_namespace }}", + "append_dimensions": { + "InstanceId": "${aws:InstanceId}" + }, + "metrics_collected": { + {% if cloudwatch_procstat_patterns -%} + "procstat": [ + {%- for procstat_pattern in cloudwatch_procstat_patterns -%} + { + "pattern": "{{ procstat_pattern }}", + "measurement": [ + "cpu_time", + "cpu_time_system", + "cpu_time_user" + ] + {%- if not loop.last -%} + }, + {%- else -%} + } + {% endif -%} + {% endfor -%} + ], + {% endif -%} + "collectd": { + "metrics_aggregation_interval": 60 + }, + "disk": { + "measurement": [ + "used_percent" + ], + "metrics_collection_interval": 60, + "resources": [ + "*" + ] + }, + "mem": { + "measurement": [ + "mem_used_percent" + ], + "metrics_collection_interval": 60 + }, + "statsd": { + "metrics_aggregation_interval": 60, + "metrics_collection_interval": 60, + "service_address": ":8125" + } + } + }{% if cloudwatch_logs_enabled %}, + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + {%- for log_config in cloudwatch_logs_collect_list -%} + { + "file_path": "{{ log_config.file_path }}", + "log_group_name": "{{ log_config.log_group_name }}", + "log_stream_name": "{{ log_config.log_stream_name }}" + }{{ ", " if not loop.last else "" }} + {%- endfor -%} + ] + } + }, + "log_stream_name": "default_server_log_stream" + } + {% endif %} +} diff --git a/playbooks/roles/aws_devstack/meta/main.yml b/playbooks/roles/aws_devstack/meta/main.yml new file mode 100644 index 00000000000..fdda41bb3a1 --- /dev/null +++ b/playbooks/roles/aws_devstack/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/playbooks/roles/aws_devstack/tasks/main.yml b/playbooks/roles/aws_devstack/tasks/main.yml new file mode 100644 index 00000000000..91263ea4c45 --- /dev/null +++ b/playbooks/roles/aws_devstack/tasks/main.yml @@ -0,0 +1,48 @@ +#- name: Remove dependencies that are no longer required +# apt: +# autoremove: yes +# +- name: Install read-only ssh key for the devstack repos + copy: + content: "{{ COMMON_GIT_IDENTITY }}" + dest: /home/ubuntu/.ssh/id_rsa + force: yes + owner: "ubuntu" + mode: "0600" + +- name: Remove old github hostkey + ansible.builtin.shell: + cmd: ssh-keygen -R github.com + become_user: ubuntu + +#- name: SSH Keyscan for github +# ansible.builtin.shell: +# cmd: ssh-keyscan -t rsa github.com >> /home/ubuntu/.ssh/known_hosts + +- name: add all github hostkeys + blockinfile: + state: present + insertafter: EOF + dest: /home/ubuntu/.ssh/known_hosts + content: | + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + +- name: Run script to sync all repos + shell: | + cd /home/ubuntu + source /home/ubuntu/.profile + ./pull-all-repos-in-directory.sh + cd edx-repos/devstack/ + make dev.pull.large-and-slow + make dev.up.lms + . /home/ubuntu/.profile && docker compose exec lms env TERM=xterm-256color bash -c 'make requirements && npm ci && paver update_assets && python manage.py lms migrate' + async: 1800 + poll: 10 + become_user: ubuntu + +- name: Delete read-only ssh key + ansible.builtin.file: + state: absent + path: /home/ubuntu/.ssh/id_rsa diff --git a/playbooks/roles/blockstore/defaults/main.yml b/playbooks/roles/blockstore/defaults/main.yml new file mode 100644 index 00000000000..0f1713f7863 --- /dev/null +++ b/playbooks/roles/blockstore/defaults/main.yml @@ -0,0 +1,86 @@ +--- +# Role to deploy Blockstore, the next-generation Open edX Learning Object Repository +# +# github: https://github.com/openedx/blockstore +# + +blockstore_service_name: 'blockstore' +blockstore_user: '{{ blockstore_service_name }}' +blockstore_home: '{{ COMMON_APP_DIR }}/{{ blockstore_service_name }}' +blockstore_code_dir: '{{ blockstore_home }}/{{ blockstore_service_name }}' +blockstore_venv_dir: '{{ blockstore_home }}/venvs/{{ blockstore_service_name }}' + +BLOCKSTORE_GIT_PATH: 'edx' +BLOCKSTORE_VERSION: 'master' +BLOCKSTORE_GIT_IDENTITY: !!null + +BLOCKSTORE_USE_PYTHON38: True + +BLOCKSTORE_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ BLOCKSTORE_GIT_PATH }}' + REPO: 'blockstore.git' + VERSION: '{{ BLOCKSTORE_VERSION }}' + DESTINATION: '{{ blockstore_code_dir }}' + SSH_KEY: '{{ BLOCKSTORE_GIT_IDENTITY }}' + +blockstore_gunicorn_host: '127.0.0.1' +blockstore_gunicorn_port: '8250' + +BLOCKSTORE_GUNICORN_WORKERS: 2 +BLOCKSTORE_GUNICORN_EXTRA: '' +BLOCKSTORE_GUNICORN_EXTRA_CONF: '' +BLOCKSTORE_GUNICORN_WORKER_CLASS: 'gevent' +BLOCKSTORE_GUNICORN_MAX_REQUESTS: null + +# This controls both gunicorn and nginx proxy timeouts. +BLOCKSTORE_REQUEST_TIMEOUT: 300 + +BLOCKSTORE_NGINX_HOSTNAME: '~^((stage|prod)-)?{{ blockstore_service_name }}.*' +BLOCKSTORE_NGINX_PORT: '1{{ blockstore_gunicorn_port }}' +BLOCKSTORE_SSL_NGINX_PORT: '4{{ blockstore_gunicorn_port }}' + +BLOCKSTORE_DEFAULT_DB_NAME: 'blockstore' +BLOCKSTORE_DATABASE_USER: 'blkstr01' +BLOCKSTORE_DATABASE_PASSWORD: 'password' +BLOCKSTORE_DATABASE_HOST: 'localhost' +BLOCKSTORE_DATABASE_PORT: 3306 +BLOCKSTORE_DATABASE_CONN_MAX_AGE: 60 + +BLOCKSTORE_DJANGO_SETTINGS_MODULE: 'blockstore.settings.production' +BLOCKSTORE_SECRET_KEY: !!null + + +# See edx_django_service_automated_users for an example of what this should be +BLOCKSTORE_AUTOMATED_USERS: {} + +# Rather than adding extra wiring for each var under here. +# Just override this whole config dictionary +BLOCKSTORE_SERVICE_CONFIG_OVERRIDES: + BLOCKSTORE_URL_ROOT: 'http://localhost:{{ blockstore_gunicorn_port }}' + +blockstore_environment: + BLOCKSTORE_CFG: '{{ COMMON_CFG_DIR }}/{{ blockstore_service_name }}.yml' + VIRTUAL_ENV: '{{ blockstore_venv_dir }}' + +# +# OS packages +# + +blockstore_debian_pkgs: + - libmysqlclient-dev + - libjpeg-dev + - libssl-dev + - libffi-dev + +blockstore_redhat_pkgs: [] + +BLOCKSTORE_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +BLOCKSTORE_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +BLOCKSTORE_ENABLE_ADMIN_URLS_RESTRICTION: false + +BLOCKSTORE_ADMIN_URLS: + - admin + diff --git a/playbooks/roles/blockstore/meta/main.yml b/playbooks/roles/blockstore/meta/main.yml new file mode 100644 index 00000000000..4ba3319b7f4 --- /dev/null +++ b/playbooks/roles/blockstore/meta/main.yml @@ -0,0 +1,43 @@ +--- +# Role to deploy Blockstore, the next-generation Open edX Learning Object Repository +# +# github: https://github.com/openedx/blockstore +# +## +# Role includes for role blockstore +# +dependencies: + - role: edx_django_service + edx_django_service_use_python38: "{{ BLOCKSTORE_USE_PYTHON38 }}" + edx_django_service_name: '{{ blockstore_service_name }}' + edx_django_service_user: '{{ blockstore_user }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ blockstore_service_name }}' + edx_django_service_repos: '{{ BLOCKSTORE_REPOS }}' + edx_django_service_version: '{{ BLOCKSTORE_VERSION }}' + edx_django_service_gunicorn_timeout: '{{ BLOCKSTORE_REQUEST_TIMEOUT }}' + edx_django_service_gunicorn_port: '{{ blockstore_gunicorn_port }}' + edx_django_service_gunicorn_extra: '{{ BLOCKSTORE_GUNICORN_EXTRA }}' + edx_django_service_gunicorn_workers: '{{ BLOCKSTORE_GUNICORN_WORKERS }}' + edx_django_service_gunicorn_worker_class: '{{ BLOCKSTORE_GUNICORN_WORKER_CLASS }}' + edx_django_service_gunicorn_max_requests: '{{ BLOCKSTORE_GUNICORN_MAX_REQUESTS }}' + edx_django_service_hostname: '{{ BLOCKSTORE_NGINX_HOSTNAME }}' + edx_django_service_max_webserver_upload: 10 + edx_django_service_nginx_port: '{{ BLOCKSTORE_NGINX_PORT }}' + edx_django_service_nginx_read_timeout: '{{ BLOCKSTORE_REQUEST_TIMEOUT }}' + edx_django_service_ssl_nginx_port: '{{ BLOCKSTORE_SSL_NGINX_PORT }}' + edx_django_service_default_db_name: '{{ BLOCKSTORE_DEFAULT_DB_NAME }}' + edx_django_service_db_user: '{{ BLOCKSTORE_DATABASE_USER }}' + edx_django_service_db_password: '{{ BLOCKSTORE_DATABASE_PASSWORD }}' + edx_django_service_default_db_host: '{{ BLOCKSTORE_DATABASE_HOST }}' + edx_django_service_default_db_atomic_requests: true + edx_django_service_django_settings_module: '{{ BLOCKSTORE_DJANGO_SETTINGS_MODULE }}' + edx_django_service_secret_key: '{{ BLOCKSTORE_SECRET_KEY }}' + edx_django_service_automated_users: '{{ BLOCKSTORE_AUTOMATED_USERS }}' + edx_django_service_config_overrides: '{{ BLOCKSTORE_SERVICE_CONFIG_OVERRIDES }}' + edx_django_service_environment_extra: '{{ blockstore_environment }}' + edx_django_service_debian_pkgs_extra: '{{ blockstore_debian_pkgs }}' + edx_django_service_has_static_assets: true + edx_django_service_decrypt_config_enabled: '{{ BLOCKSTORE_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ BLOCKSTORE_COPY_CONFIG_ENABLED }}' + EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: '{{ BLOCKSTORE_ENABLE_ADMIN_URLS_RESTRICTION }}' + EDX_DJANGO_SERVICE_ADMIN_URLS: '{{ BLOCKSTORE_ADMIN_URLS }}' diff --git a/playbooks/roles/blockstore/tasks/main.yml b/playbooks/roles/blockstore/tasks/main.yml new file mode 100644 index 00000000000..f1f43e9197d --- /dev/null +++ b/playbooks/roles/blockstore/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# Role to deploy Blockstore, the next-generation Open edX Learning Object Repository +# +# github: https://github.com/openedx/blockstore +# +# +# Tasks for role blockstore +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# diff --git a/playbooks/roles/browsers/defaults/main.yml b/playbooks/roles/browsers/defaults/main.yml index 8cc4d8100e3..4be1a8af01c 100644 --- a/playbooks/roles/browsers/defaults/main.yml +++ b/playbooks/roles/browsers/defaults/main.yml @@ -1,22 +1,53 @@ +# We install at least one package from the multiverse (ubuntu-restricted-extras) +# Ensure that we have it enabled for machines that run the browser role. +multiverse_urls: + - 'deb http://archive.ubuntu.com/ubuntu {{ansible_distribution_release}} multiverse' + - 'deb-src http://archive.ubuntu.com/ubuntu {{ansible_distribution_release}} multiverse' + - 'deb http://archive.ubuntu.com/ubuntu {{ansible_distribution_release}}-updates multiverse' + - 'deb-src http://archive.ubuntu.com/ubuntu {{ansible_distribution_release}}-updates multiverse' + - 'deb http://archive.ubuntu.com/ubuntu {{ansible_distribution_release}}-security multiverse' + - 'deb-src http://archive.ubuntu.com/ubuntu {{ansible_distribution_release}}-security multiverse' + browser_deb_pkgs: - - xvfb - dbus-x11 - - libgconf2-4 + - gdebi + - libcurl4 + - libgconf-2-4 + - libnss3 - libxss1 - - libnss3-1d - - libcurl3 + - ubuntu-restricted-extras - xdg-utils - - gdebi + - xvfb + - wget -# Debian packages we host in S3 to ensure correct browser version -# Both Chrome and FireFox update their apt repos with the latest version, -# which often causes spurious acceptance test failures. +# Firefox for Xenial +FIREFOX_VERSION: version 59.* + +# Packages we host in S3 to ensure correct browser version Both Chrome and +# FireFox update their apt repos with the latest version, which often causes +# spurious acceptance test failures. browser_s3_deb_pkgs: - - { name: "google-chrome-stable_30.0.1599.114-1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_30.0.1599.114-1_amd64.deb" } - - { name: "firefox_25.0+build3-0ubuntu0.12.04.1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_25.0%2Bbuild3-0ubuntu0.12.04.1_amd64.deb" } + - name: firefox_61.0.1+build1-0ubuntu0.16.04.1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_61.0.1%2Bbuild1-0ubuntu0.16.04.1_amd64.deb + - name: google-chrome-stable_68.0.3440.84-1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_68.0.3440.84-1_amd64.deb + +trusty_browser_s3_deb_pkgs: + - name: firefox-mozilla-build_42.0-0ubuntu1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox-mozilla-build_42.0-0ubuntu1_amd64.deb + - name: google-chrome-stable_68.0.3440.84-1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_68.0.3440.84-1_amd64.deb -# Chrome and ChromeDriver -chromedriver_version: 2.6 +# GeckoDriver +geckodriver_url: "https://github.com/mozilla/geckodriver/releases/download/v0.21.0/geckodriver-v0.21.0-linux64.tar.gz" + +# ChromeDriver +chromedriver_version: 2.41 chromedriver_url: "http://chromedriver.storage.googleapis.com/%7B%7B%20chromedriver_version%20%7D%7D/chromedriver_linux64.zip" +# PhantomJS +phantomjs_version: "phantomjs-1.9.8-linux-x86_64" +phantomjs_tarfile: "{{ phantomjs_version }}.tar.bz2" +phantomjs_url: "https://bitbucket.org/ariya/phantomjs/downloads/%7B%7B%20phantomjs_tarfile%20%7D%7D" + browser_xvfb_display: ":1" diff --git a/playbooks/roles/browsers/files/geckodriver b/playbooks/roles/browsers/files/geckodriver new file mode 100644 index 00000000000..10ffb8422dd --- /dev/null +++ b/playbooks/roles/browsers/files/geckodriver @@ -0,0 +1,3 @@ +#!/bin/bash + +/usr/local/bin/geckodriver-bin "$@" --marionette-port 2828 diff --git a/playbooks/roles/browsers/tasks/main.yml b/playbooks/roles/browsers/tasks/main.yml index 471dd72df9c..124dc6d8419 100644 --- a/playbooks/roles/browsers/tasks/main.yml +++ b/playbooks/roles/browsers/tasks/main.yml @@ -1,39 +1,277 @@ # Install browsers required to run the JavaScript # and acceptance test suite locally without a display --- +- name: lock Firefox version + copy: + dest: /etc/apt/preferences.d/firefox-pinned-version + content: | + Package: firefox + Pin: {{ FIREFOX_VERSION }} + Pin-Priority: 1001 + tags: + - install + - install:system-requirements + +- name: Add multiverse repos + apt_repository: + repo: "{{item}}" + update_cache: false # only update when adding new repos + register: multiverse_installed + when: ansible_distribution == 'Ubuntu' + with_items: "{{ multiverse_urls }}" + tags: + - install + - install:system-requirements + +- name: Update cache when adding multiverse repos + apt: + update_cache: true + when: multiverse_installed.changed + tags: + - install + - install:system-requirements + - name: install system packages - apt: pkg={{','.join(browser_deb_pkgs)}} - state=present update_cache=yes + apt: + name: "{{ item }}" + update_cache: yes + with_items: "{{ browser_deb_pkgs }}" + tags: + - install + - install:system-requirements -- name: download browser debian packages from S3 - get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}" - register: download_deb - with_items: browser_s3_deb_pkgs +- name: download trusty browser packages from S3 + get_url: + dest: /tmp/{{ item.name }} + url: "{{ item.url }}" + register: download_trusty_deb + with_items: "{{ trusty_browser_s3_deb_pkgs }}" + when: ansible_distribution_release == 'trusty' + tags: + - install + - install:system-requirements + +- name: Add Chrome Public keys for focal + shell: wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - + when: ansible_distribution_release == 'focal' + tags: + - install + - install:system-requirements + +- name: download xenial/bionic/focal browser packages from S3 + get_url: + dest: /tmp/{{ item.name }} + url: "{{ item.url }}" + register: download_xenial_deb + with_items: "{{ browser_s3_deb_pkgs }}" + when: ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - install + - install:system-requirements -- name: install browser debian packages +- name: install trusty browser packages shell: gdebi -nq /tmp/{{ item.name }} - when: download_deb.changed - with_items: browser_s3_deb_pkgs + with_items: "{{ trusty_browser_s3_deb_pkgs }}" + when: download_trusty_deb.changed and + ansible_distribution_release == 'trusty' + tags: + - install + - install:system-requirements + +- name: install xenial/bionic/focal browser packages + shell: gdebi -nq /tmp/{{ item.name }} + with_items: "{{ browser_s3_deb_pkgs }}" + when: download_xenial_deb.changed and + ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - install + - install:system-requirements + +- name: download GeckoDriver + get_url: + url: "{{ geckodriver_url }}" + dest: /tmp/geckodriver.tar.gz + register: download_geckodriver + tags: + - install + - install:system-requirements + +- name: unzip GeckoDriver tarfile + shell: tar -xvf /tmp/geckodriver.tar.gz -C /usr/local/bin/ + args: + chdir: /var/tmp + when: download_geckodriver.changed + tags: + - install + - install:system-requirements + +- name: Rename geckodriver to geckodriver-bin + command: mv /usr/local/bin/geckodriver /usr/local/bin/geckodriver-bin + tags: + - install + - install:system-requirements -- name: Install ChromeDriver +- name: make GeckoDriver binary executable + file: + path: /usr/local/bin/geckodriver-bin + mode: 0755 + when: download_geckodriver.changed + tags: + - install + - install:system-requirements + +- name: verify GeckoDriver location and mode + stat: + path: /usr/local/bin/geckodriver-bin + register: geckodriver + tags: + - install + - install:system-requirements + +- name: Add geckodriver wrapper script + copy: + src: "../files/geckodriver" + dest: "/usr/local/bin/geckodriver" + mode: 0755 + tags: + - install + - install:system-requirements + +- assert: + that: + - "geckodriver.stat.exists" + - "geckodriver.stat.mode == '0755'" + tags: + - install + - install:system-requirements + +- name: download ChromeDriver get_url: - url={{ chromedriver_url }} - dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip + url: "{{ chromedriver_url }}" + dest: /var/tmp/chromedriver_{{ chromedriver_version }}.zip + register: download_chromedriver + tags: + - install + - install:system-requirements -- name: Install ChromeDriver 2 +- name: unzip ChromeDriver tarfile shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip - chdir=/var/tmp + args: + chdir: /var/tmp + when: download_chromedriver.changed + tags: + - install + - install:system-requirements -- name: Install ChromeDriver 3 +- name: move ChromeDriver binary to /usr/local shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver + when: download_chromedriver.changed + tags: + - install + - install:system-requirements -- name: Install Chromedriver 4 - file: path=/usr/local/bin/chromedriver mode=0755 +- name: make ChromeDriver binary executable + file: + path: /usr/local/bin/chromedriver + mode: 0755 + when: download_chromedriver.changed + tags: + - install + - install:system-requirements +- name: verify ChromeDriver location and mode + stat: + path: /usr/local/bin/chromedriver + register: chromedriver + tags: + - install + - install:system-requirements + +- assert: + that: + - "chromedriver.stat.exists" + - "chromedriver.stat.mode == '0755'" + tags: + - install + - install:system-requirements + +- name: download PhantomJS + get_url: + url: "{{ phantomjs_url }}" + dest: "/var/tmp/{{ phantomjs_tarfile }}" + register: download_phantom_js + tags: + - install + - install:system-requirements + +- name: unpack the PhantomJS tarfile + shell: "tar -xjf /var/tmp/{{ phantomjs_tarfile }}" + args: + chdir: "/var/tmp" + when: download_phantom_js.changed + tags: + - install + - install:system-requirements + +- name: move PhantomJS binary to /usr/local + shell: mv /var/tmp/{{ phantomjs_version }}/bin/phantomjs /usr/local/bin/phantomjs + when: download_phantom_js.changed + tags: + - install + - install:system-requirements + +- name: verify phantomjs location + stat: path=/usr/local/bin/phantomjs + register: phantomjs + tags: + - install + - install:system-requirements + +- assert: + that: "phantomjs.stat.exists" + tags: + - install + - install:system-requirements + +# Systemd doesn't exist in 14.04, use upstart instead - name: create xvfb upstart script - template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root + template: + src: xvfb.conf.j2 + dest: /etc/init/xvfb.conf + owner: root + group: root + when: ansible_distribution_release == 'trusty' + tags: + - install + - install:configuration - name: start xvfb - shell: start xvfb - ignore_errors: yes + service: + name: xvfb + state: restarted + when: ansible_distribution_release == 'trusty' + tags: + - manage + - manage:start + +# Use systemd for xvfb in 16.04 +- name: create xvfb systemd service + template: + src: xvfb.service.j2 + dest: /etc/systemd/system/xvfb.service + owner: root + group: root + when: ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - install + - install:configuration +- name: enable and start xvfb systemd service + systemd: + name: xvfb + enabled: yes + state: started + when: ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - manage + - manage:start diff --git a/playbooks/roles/browsers/templates/xvfb.service.j2 b/playbooks/roles/browsers/templates/xvfb.service.j2 new file mode 100644 index 00000000000..a5d4055d4b8 --- /dev/null +++ b/playbooks/roles/browsers/templates/xvfb.service.j2 @@ -0,0 +1,9 @@ +[Unit] +Description=Xvfb X Server +After=network.target + +[Service] +ExecStart=/usr/bin/Xvfb {{ browser_xvfb_display }} -screen 0 1024x768x24 + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/cassandra/defaults/main.yml b/playbooks/roles/cassandra/defaults/main.yml new file mode 100644 index 00000000000..f8ae0fbc0ae --- /dev/null +++ b/playbooks/roles/cassandra/defaults/main.yml @@ -0,0 +1,33 @@ +--- +CASSANDRA_VERSION: "2.0.14" + +# AFAIK there's no way to detect instance storage after the instaces has started. +# Therefore, you MUST know the device names ahead of time. +# If this is empty, no disks will be mounted and data will be put on the root volume. +cassandra_ephemeral_disks: [] +#cassandra_ephemeral_disks: ["/dev/xvdb", "/dev/xvdc"] + +cassandra_data_dir_prefix: /var/lib/cassandra/data + +#should be parallel to cassandra_ephemeral_disks if there are any +cassandra_data_dirs: ["data.1"] + +#These are set by the package. Don't change them! +cassandra_user: "cassandra" +cassandra_group: "cassandra" + +# cassandra.yaml basic configuration parameters +cassandra_cluster_name: "Test Cluster" + +#Set this unless you want your node to only listen locally. +cassandra_seeds: ["127.0.0.1"] + +#should eventually use EC2Snitch +cassandra_snitch: "SimpleSnitch" + +#set this ONLY when initializing a new cluster with NO DATA +cassandra_auto_bootstrap: false + +# For single-node locally-accessible deployments only! Otherwise, use: +# cassandra_listen_address: "" +cassandra_listen_address: localhost diff --git a/playbooks/roles/cassandra/meta/main.yml b/playbooks/roles/cassandra/meta/main.yml new file mode 100644 index 00000000000..e744ff3ab53 --- /dev/null +++ b/playbooks/roles/cassandra/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - oraclejdk \ No newline at end of file diff --git a/playbooks/roles/cassandra/tasks/main.yml b/playbooks/roles/cassandra/tasks/main.yml new file mode 100644 index 00000000000..6612c72d147 --- /dev/null +++ b/playbooks/roles/cassandra/tasks/main.yml @@ -0,0 +1,83 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role cassandra +# +# Overview: +# +# This role sets up a cassandra node. + +- name: Unmount disks mounted to the wrong place + mount: + name: "{{ item[0].mount }}" + src: "{{ item[0].device }}" + fstype: "{{ item[0].fstype }}" + state: unmounted + when: item[1] == item[0].device and not item[0].mount.startswith(cassandra_data_dir_prefix) + with_nested: + - ansible_mounts + - cassandra_ephemeral_disks + +- name: Create data directories + file: + path: "{{ cassandra_data_dir_prefix }}/{{ item }}" + state: directory + with_items: "{{ cassandra_data_dirs }}" + +- name: Mount ephemeral disks + mount: + name: "{{ cassandra_data_dir_prefix }}/{{ item.1 }}" + src: "{{ item.0 }}" + fstype: ext4 + state: mounted + with_together: + - cassandra_ephemeral_disks + - cassandra_data_dirs + when: cassandra_ephemeral_disks + +#Mounting a disk overlays its permissions +- name: Set permissions on data dirs + file: + path: "{{ cassandra_data_dir_prefix }}/{{ item }}" + owner: "{{ cassandra_user }}" + group: "{{ cassandra_group }}" + with_items: "{{ cassandra_data_dirs }}" + +- name: Add the datastax repository apt-key + apt_key: + url: "http://debian.datastax.com/debian/repo_key" + state: present + +- name: Add the datastax repository + apt_repository: + repo: "deb http://debian.datastax.com/community stable main" + state: present + +- name: Install the cassandra package + apt: + name: "cassandra={{ CASSANDRA_VERSION }}" + state: present + update_cache: yes + +- name: Update the cassandra configuration + template: + src: "{{item}}.j2" + dest: /etc/cassandra/{{item}} + owner: "{{cassandra_user}}" + group: "{{cassandra_group}}" + mode: 0644 + with_items: + - "cassandra.yaml" + +- name: restart cassandra + service: + name: cassandra + state: restarted diff --git a/playbooks/roles/cassandra/templates/cassandra.yaml.j2 b/playbooks/roles/cassandra/templates/cassandra.yaml.j2 new file mode 100644 index 00000000000..36df4741ba0 --- /dev/null +++ b/playbooks/roles/cassandra/templates/cassandra.yaml.j2 @@ -0,0 +1,713 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: {{ cassandra_cluster_name }} + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: AllowAllAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 1000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +data_file_directories: +{% for dir in cassandra_data_dirs %} + - {{ cassandra_data_dir_prefix}}/{{ dir }} +{% endfor %} + +# commit log +commitlog_directory: /var/lib/cassandra/commitlog + +# policy for data disk failures: +# stop_paranoid: shut down gossip and Thrift even for single-sstable errors. +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# policy for commit disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# stop_commit: shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# ignore: ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The off-heap memory allocator. Affects storage engine metadata as +# well as caches. Experiments show that JEMAlloc saves some memory +# than the native GCC allocator (i.e., JEMalloc is more +# fragmentation-resistant). +# +# Supported values are: NativeAllocator, JEMallocAllocator +# +# If you intend to use JEMallocAllocator you have to install JEMalloc as library and +# modify cassandra-env.sh as directed in the file. +# +# Defaults to NativeAllocator +# memory_allocator: NativeAllocator + +# saved caches +saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. By default this allows 1024*(CPU cores) pending +# entries on the commitlog queue. If you are writing very large blobs, +# you should reduce that; 16*cores works reasonably well for 1MB blobs. +# It should be at least as large as the concurrent_writes setting. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 +# commitlog_periodic_queue_size: + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "{{ cassandra_seeds | join(',') }}" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for sstable-reading buffers. Defaults to +# the smaller of 1/4 of heap or 512MB. +# file_cache_size_in_mb: 512 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/4 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: {{ cassandra_listen_address }} + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +start_rpc: true + +# The address to bind the Thrift RPC service and native transport +# server -- clients connect here. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0 +# here if you want to listen on all interfaces, but that will break clients +# that rely on node auto-discovery. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +rpc_address: 0.0.0.0 +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# 2) but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + + +# Log WARN on any batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# inter_dc_stream_throughput_outbound_megabits_per_sec: + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This _can_ involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: {{ cassandra_snitch }} + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: all + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# Enable or disable kernel page cache preheating from contents of the key cache after compaction. +# When enabled it would preheat only first "page" (4KB) of each row to optimize +# for sequential access. Note: This could be harmful for fat rows, see CASSANDRA-4937 +# for further details on that topic. +preheat_kernel_page_cache: false + +auto_bootstrap: {{ cassandra_auto_bootstrap }} diff --git a/playbooks/roles/certs/defaults/main.yml b/playbooks/roles/certs/defaults/main.yml deleted file mode 100644 index 206d05e2694..00000000000 --- a/playbooks/roles/certs/defaults/main.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -## -# Defaults for role certs -# - -CERTS_QUEUE_URL: "http://localhost:18040/" -CERTS_BUCKET: "" -# basic auth credentials for connecting -# to the xqueue server -CERTS_XQUEUE_AUTH_USER: "edx" -CERTS_XQUEUE_AUTH_PASS: "edx" -# credentials for connecting to the xqueue server -CERTS_QUEUE_USER: "lms" -CERTS_QUEUE_PASS: "password" -# AWS credentials for certificate upload -CERTS_AWS_KEY: "" -CERTS_AWS_ID: "" -# GPG key ID, defaults to the dummy key -CERTS_KEY_ID: "FEF8D954" -# Path to git identity file for pull access to -# the edX certificates repo - REQUIRED -# Example - {{ secure_dir }}/files/git-identity -CERTS_LOCAL_GIT_IDENTITY: !!null -# Path to public and private gpg key for signing -# the edX certificate. Default is a dummy key -CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt" - -########## Internal role vars below - -certs_user: certs -certs_app_dir: "{{ COMMON_APP_DIR }}/certs" -certs_code_dir: "{{ certs_app_dir }}/certificates" -certs_venvs_dir: "{{ certs_app_dir }}/venvs" -certs_venv_dir: "{{ certs_venvs_dir }}/certs" -certs_venv_bin: "{{ certs_venv_dir }}/bin" -certs_git_ssh: /tmp/git_ssh.sh -certs_git_identity: "{{ certs_app_dir }}/git-identity" -certs_requirements_file: "{{ certs_code_dir }}/requirements.txt" -certs_repo: "git@github.com:/edx/certificates" -certs_version: 'master' -certs_gpg_dir: "{{ certs_app_dir }}/gnupg" -certs_env_config: - # CERTS_DATA is legacy, not used - CERT_DATA: {} - QUEUE_NAME: "certificates" - QUEUE_URL: $CERTS_QUEUE_URL - CERT_BUCKET: $CERTS_BUCKET - # gnupg signing key - CERT_KEY_ID: $CERTS_KEY_ID - LOGGING_ENV: "" - CERT_GPG_DIR: $certs_gpg_dir - -certs_auth_config: - QUEUE_USER: $CERTS_QUEUE_USER - QUEUE_PASS: $CERTS_QUEUE_PASS - QUEUE_AUTH_USER: $CERTS_XQUEUE_AUTH_USER - QUEUE_AUTH_PASS: $CERTS_XQUEUE_AUTH_PASS - CERT_KEY_ID: $CERTS_KEY_ID - CERT_AWS_ID: $CERTS_AWS_ID - CERT_AWS_KEY: $CERTS_AWS_KEY diff --git a/playbooks/roles/certs/files/example-private-key.txt b/playbooks/roles/certs/files/example-private-key.txt deleted file mode 100644 index 3ab8accbc3b..00000000000 --- a/playbooks/roles/certs/files/example-private-key.txt +++ /dev/null @@ -1,57 +0,0 @@ ------BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1.4.11 (GNU/Linux) - -lQOYBFJwVOkBCAC4heT6+P1sGgITAB5C+hKNr4RACS47K1nxgIiEqiFMIycluDmM -4kdqFInzDK8GHF2W5KijZmYf7LrWIg4+PmnyYAB7cO+eJUDfTE7n7bjGQL3LohJN -FTlRsXKOKGWoBqlytE3D16lQIIp0JkqB9sHO3Y9yOgEsSy3cMWKtT8U6qx40xV+e -t0FYmqL7pBE7OFfvCIe7+kthsTqFys/jkRNFvbSo5fjA1m9ubjEJqqfnhuvLaL5O -YHGe1nKQRLi45gmZ1JYvxfZrWUO2BeulNY/mvAFQnRNRRiWfM3Ic4Ya9Wv62wS3p -dYY4HEtDQDyKpOkJ2R31+1FhZYIKJTYR89jxABEBAAEAB/wOApyQMbeMLa1ao/eo -PjSKbXktI4VPGMuLeqbi68f7b+/Y/VPhToz9kPGocp4XaK/ydQoY3f2DDwZgm9VZ -BIQm0wM2XCzVZR631aNoGLSe2OuQOo4JLENd4ItCH+8YAul6vBXreMRyQQZCK2Yc -2A9/FXN+yMiuBEdHILjNT/E5swNm0J85YlXpIW6Jm3aR6OjzfFS/j+7AEDSL5MZX -JotfGjYXuC1MOw5YJZKWkQBz+5IaVceOd9s8TlZFq/eYrN5sqAWh06CBY+Zye3fg -/WiWFUdTgpG81lbAXGxHrjQ5f22saOzkbv0FjdEfx1M9Wcj6OAIRXI7k8EkZJia1 -IYEBBADYRvRE2zyR5M72MfCX1XUOpx/9OZCrVsYoKqp1BORXjt58Szs7UFdeAXE+ -bPzbpcjENiVYVjoeQKCNTU78gzjD+NzkfTdsF7rrvXObo6NpChTCOdQfg56Ll+3y -3nUDKIcFXsYP1NIC2SL0APcpUtDLPIWb0XRnlvBQlakmnyb7bQQA2mnn15LVRK0J -1wYZiSwrRIcE7X+zy6t8iERr+E0jIyQQV4vaOYItCDTP8fzNiiX10Nkt5imRqML3 -NBPs0jInjmYxMmzvjVxyUDv4rGGbiXXeh+W1v/mweMH2pbiItjhyeSVt4U2l6GKI -Ob/K+khx1ftfOTktLTZVMPg6NzPRfRUD/jyLL92V7eoHshreqFIUcBmUdQnMVyz2 -NBhci7RHGn3J84TTMCXBJL8MLUu9PKxfFcZjw1Wet8avX5oPjgl3OiQpjjx69sO5 -S/UWpGaEOrz87j4VRGPb6zmegkZs44sQEfwhDJk6O1eQ5dYzniRC46nzMpETFfIF -U8m8bJrrus4HPEy0HmV4YW1wbGUga2V5IDx0ZXN0QGV4YW1wbGUuY29tPokBOAQT -AQIAIgUCUnBU6QIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQBECJtv74 -2VSWKQf/f0PWmbGxdiBIK5gf2pmaK0aDDM09v7vedysIn/URnj26BMN/YEyDYnZS -BN+iuU6VartvEYlNeYiRAnaG/6gl7DJh2l2X/iuDn0xKT0GjqDpjh7n6964OKAz7 -RHWADXqsr5BWms1EPFtDVnAJfN2A3cxTeA5vUUl41WvCJQa7L8Bw7SezkS0yn8Rn -u4icNKyew7TrFofIydws6LTM1DhHpCB32z6b7HHt85OOzpuUm07HP83S59lxBp6x -x3NH9AH/WPeXiS5QRh1jP6qzUAHoHQpsV2XonmC4JXl+ZFxNyZeJ000ldDFfEHrO -RLg3d5GkZ1pDVDn3HlZ+SKqYilRXCJ0DmARScFTpAQgA1KGTRGcqwla5/VOuHwxw -ABpLYdpsetYoOORjJQvHakG8QBchxsJVniBijD09gFmHYpdSJaeHnvqkeHGO1fJa -E4QxS4AYt/HVoi86RhBLD/Gr0/DWC/0XUV5613PSmWkYCCTgWLaxT9MpPjtGVd4v -L6Iv/d8Go/Wrq55zCl82PTA7ao4PxSSxlforfZOZqsJ/pzjCRkF6Z7co+LO24KSl -Lt4iN2vwJ2VhvOrMFuV91WQeEJWdTX+yx035eU/MFu9u243CE0UGNzWHjYLpgBxl -Pg0W5GFRZM/LYkXAfHAM4/Ic2ex2LQ0RLiH4i0FbzoSwjvz586v2Sagc5nsYMoGu -gQARAQABAAf/W6W+23taJ0SJSuLACJrsRWcP+b/TBQj8cjUidKvEioyFztwJj2lg -zNSplUeqFAHCxGBzpE42uvPOYymTBq08XPAb7S5ruREP4yVXCS7po5gnVyUVpToz -zDscWHQQIFZ3aL84QZSRDVZ3Dt8unEE1dmMCK3rvGkl/8mtLq3tJXgp7/wdsK4G0 -3AuJVQ918XlozNTayGfdCPhWicE7fv5peUlWRWlSuSNmTrHiAbysd2xwXnMq/OdQ -Q+z7ogQHhUvQQ+31msGlcCJQqqWr250/HBrTATrRJNIVvvzCgpw3/6r99MNwlSWV -ZhDotwf745fdzZiwdgJ04nhEj9QmKynKXwQA29p7DmMWMZg61qU481YNWgc5RMjL -ADUS2iC5nr/Y5HuAsGWj9ZkDvRXKSyexkZ+OXi6EonGCGjCNONPaB8JWRO7LssEc -VG+lPp4mwASE38cjfFy7DdEGpxn3eZPDsNwv7vnWhNyGSh8FXKoYXyZiJ6F3zvkU -aWwfaTtxVplfn88EAPeXHLkwl+D8zkk8ILYnsJKEKjcqUwiQ6L7JMEhd+GVo5xR+ -WUDdhnmEkH/QZZt8zTpYL3Hl3JsqQYidq0uzy39qg+cVvD9yJkHP4KMAqe9QcRYR -eQvpopMYt5va7pyaebZbpxfP9M7Y2/5VT59GBO6uHy4CMR1uM4Z50QA0kZCvA/oD -D/9qEaWzwqLtXjN1iRxOv7ioor8ExvA/8HY8xtCsCLuFuo9P44xtYzSCzLdoOYCE -4Lrn7DeE2hXEoq/2VEWoRS4+kU1vUBIJEAxfHk6HozA0apFPqm8ODH44s68VRTce -pGwORxsFhMHw1/m5A1RBZF8UP7VXFxluYuwx6S5NyjSfiQEfBBgBAgAJBQJScFTp -AhsMAAoJEARAibb++NlUojkIAKHwS1VSeW6fgWv7H2qaTjdMeNG7vXUYKUE7KMpQ -UmvdHobMfbO9SEgihwG+WdgPy96RlYx5PuVfeWkPVdVsbrU9BuR+9qdYyGGH4FvP -qAaruT3dFLRFvDj/ta94gDFGCH1LrtGI/t78wjjIEd8QOGIj+8Uo1Z6HKExSsNuG -+8usut6je50a2CsAyoZtrPmybZdkU6eOuM5ZSGDpgfTlFNpeK3sf7CTnYA5NTLPC -wWbyCxUb7EUrch+StmJWsIzS4mClMd6nB4480FwwhGbdFejSF20z64c6hbxuwgfS -nyXklWktEX0d5T7wdAi+UOvNsdoigzUMWpBoo07VOlzjMFU= -=iNqX ------END PGP PRIVATE KEY BLOCK----- diff --git a/playbooks/roles/certs/files/example-public-key.txt b/playbooks/roles/certs/files/example-public-key.txt deleted file mode 100644 index e1c620bcfe6..00000000000 --- a/playbooks/roles/certs/files/example-public-key.txt +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.11 (GNU/Linux) - -mQENBFJwVOkBCAC4heT6+P1sGgITAB5C+hKNr4RACS47K1nxgIiEqiFMIycluDmM -4kdqFInzDK8GHF2W5KijZmYf7LrWIg4+PmnyYAB7cO+eJUDfTE7n7bjGQL3LohJN -FTlRsXKOKGWoBqlytE3D16lQIIp0JkqB9sHO3Y9yOgEsSy3cMWKtT8U6qx40xV+e -t0FYmqL7pBE7OFfvCIe7+kthsTqFys/jkRNFvbSo5fjA1m9ubjEJqqfnhuvLaL5O -YHGe1nKQRLi45gmZ1JYvxfZrWUO2BeulNY/mvAFQnRNRRiWfM3Ic4Ya9Wv62wS3p -dYY4HEtDQDyKpOkJ2R31+1FhZYIKJTYR89jxABEBAAG0HmV4YW1wbGUga2V5IDx0 -ZXN0QGV4YW1wbGUuY29tPokBOAQTAQIAIgUCUnBU6QIbAwYLCQgHAwIGFQgCCQoL -BBYCAwECHgECF4AACgkQBECJtv742VSWKQf/f0PWmbGxdiBIK5gf2pmaK0aDDM09 -v7vedysIn/URnj26BMN/YEyDYnZSBN+iuU6VartvEYlNeYiRAnaG/6gl7DJh2l2X -/iuDn0xKT0GjqDpjh7n6964OKAz7RHWADXqsr5BWms1EPFtDVnAJfN2A3cxTeA5v -UUl41WvCJQa7L8Bw7SezkS0yn8Rnu4icNKyew7TrFofIydws6LTM1DhHpCB32z6b -7HHt85OOzpuUm07HP83S59lxBp6xx3NH9AH/WPeXiS5QRh1jP6qzUAHoHQpsV2Xo -nmC4JXl+ZFxNyZeJ000ldDFfEHrORLg3d5GkZ1pDVDn3HlZ+SKqYilRXCLkBDQRS -cFTpAQgA1KGTRGcqwla5/VOuHwxwABpLYdpsetYoOORjJQvHakG8QBchxsJVniBi -jD09gFmHYpdSJaeHnvqkeHGO1fJaE4QxS4AYt/HVoi86RhBLD/Gr0/DWC/0XUV56 -13PSmWkYCCTgWLaxT9MpPjtGVd4vL6Iv/d8Go/Wrq55zCl82PTA7ao4PxSSxlfor -fZOZqsJ/pzjCRkF6Z7co+LO24KSlLt4iN2vwJ2VhvOrMFuV91WQeEJWdTX+yx035 -eU/MFu9u243CE0UGNzWHjYLpgBxlPg0W5GFRZM/LYkXAfHAM4/Ic2ex2LQ0RLiH4 -i0FbzoSwjvz586v2Sagc5nsYMoGugQARAQABiQEfBBgBAgAJBQJScFTpAhsMAAoJ -EARAibb++NlUojkIAKHwS1VSeW6fgWv7H2qaTjdMeNG7vXUYKUE7KMpQUmvdHobM -fbO9SEgihwG+WdgPy96RlYx5PuVfeWkPVdVsbrU9BuR+9qdYyGGH4FvPqAaruT3d -FLRFvDj/ta94gDFGCH1LrtGI/t78wjjIEd8QOGIj+8Uo1Z6HKExSsNuG+8usut6j -e50a2CsAyoZtrPmybZdkU6eOuM5ZSGDpgfTlFNpeK3sf7CTnYA5NTLPCwWbyCxUb -7EUrch+StmJWsIzS4mClMd6nB4480FwwhGbdFejSF20z64c6hbxuwgfSnyXklWkt -EX0d5T7wdAi+UOvNsdoigzUMWpBoo07VOlzjMFU= -=WP59 ------END PGP PUBLIC KEY BLOCK----- diff --git a/playbooks/roles/certs/handlers/main.yml b/playbooks/roles/certs/handlers/main.yml deleted file mode 100644 index b15021fa224..00000000000 --- a/playbooks/roles/certs/handlers/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Handlers for role certs -# -# Overview: -# - -- name: restart certs - supervisorctl_local: > - name=certs - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - when: certs_installed is defined diff --git a/playbooks/roles/certs/meta/main.yml b/playbooks/roles/certs/meta/main.yml deleted file mode 100644 index 39634fb994d..00000000000 --- a/playbooks/roles/certs/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - supervisor diff --git a/playbooks/roles/certs/tasks/deploy.yml b/playbooks/roles/certs/tasks/deploy.yml deleted file mode 100644 index 228b045b225..00000000000 --- a/playbooks/roles/certs/tasks/deploy.yml +++ /dev/null @@ -1,85 +0,0 @@ ---- - -- name: create certificate application config - template: > - src=certs.env.json.j2 - dest={{ certs_app_dir }}/env.json - sudo_user: "{{ certs_user }}" - notify: restart certs - -- name: create certificate auth file - template: > - src=certs.auth.json.j2 - dest={{ certs_app_dir }}/auth.json - sudo_user: "{{ certs_user }}" - notify: restart certs - -- name: writing supervisor script for certificates - template: > - src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf - owner={{ supervisor_user }} mode=0644 - notify: restart certs - -- name: create ssh script for git - template: > - src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }} - owner={{ certs_user }} mode=750 - notify: restart certs - -- stat: path={{ CERTS_LOCAL_GIT_IDENTITY }} - register: certs_identity - -- name: install read-only ssh key for the certs repo - copy: > - src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }} - force=yes owner={{ certs_user }} mode=0600 - notify: restart certs - when: certs_identity.stat.exists - -- name: checkout certificates repo into {{ certs_code_dir }} - git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }} - sudo_user: "{{ certs_user }}" - environment: - GIT_SSH: "{{ certs_git_ssh }}" - notify: restart certs - when: certs_identity.stat.exists - -- name: remove read-only ssh key for the certs repo - file: path={{ certs_git_identity }} state=absent - notify: restart certs - when: certs_identity.stat.exists - -- name : install python requirements - pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present - sudo_user: "{{ certs_user }}" - notify: restart certs - - # call supervisorctl update. this reloads - # the supervisorctl config and restarts - # the services if any of the configurations - # have changed. - # -- name: update supervisor configuration - shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" - register: supervisor_update - sudo_user: "{{ supervisor_service_user }}" - changed_when: supervisor_update.stdout != "" - -- name: ensure certs has started - supervisorctl_local: > - name=certs - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - sudo_user: "{{ supervisor_service_user }}" - -- name: create a symlink for venv python - file: > - src="http://github.com/%7B%7B%20certs_venv_bin%20%7D%7D/%7B%7B%20item%20%7D%7D" - dest={{ COMMON_BIN_DIR }}/{{ item }}.certs - state=link - with_items: - - python - - pip - -- set_fact: certs_installed=true diff --git a/playbooks/roles/certs/tasks/main.yml b/playbooks/roles/certs/tasks/main.yml deleted file mode 100644 index 8d7640029f2..00000000000 --- a/playbooks/roles/certs/tasks/main.yml +++ /dev/null @@ -1,80 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role certs -# -# Overview: -# -# Installs the edX certificate server. -# -# The certificates repo is currently *not* public -# due to sensitive information in it, it may be made -# public in the future. -# -# Dependencies: -# - common -# - supervisor -# -# -# Example play: -# -# - roles: -# - common -# - supervisor -# - certs -# -- name: Checking to see if git identity is set - fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!" - when: not CERTS_LOCAL_GIT_IDENTITY - -- name: create application user - user: > - name="{{ certs_user }}" - home="{{ certs_app_dir }}" - createhome=no - shell=/bin/false - notify: restart certs - -- name: create certs app and data dirs - file: > - path="{{ item }}" - state=directory - owner="{{ certs_user }}" - group="{{ common_web_group }}" - notify: restart certs - with_items: - - "{{ certs_app_dir }}" - - "{{ certs_venvs_dir }}" - -- name: create certs gpg dir - file: > - path="{{ certs_gpg_dir }}" state=directory - owner="{{ common_web_user }}" - mode=0700 - notify: restart certs - -- name: copy the private gpg signing key - copy: > - src={{ CERTS_LOCAL_PRIVATE_KEY }} - dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} - owner={{ common_web_user }} mode=0600 - notify: restart certs - register: certs_gpg_key - - -- name: load the gpg key - shell: > - /usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} - sudo_user: "{{ common_web_user }}" - when: certs_gpg_key.changed - notify: restart certs - -- include: deploy.yml tags=deploy diff --git a/playbooks/roles/certs/templates/certs.auth.json.j2 b/playbooks/roles/certs/templates/certs.auth.json.j2 deleted file mode 100644 index 339bc104354..00000000000 --- a/playbooks/roles/certs/templates/certs.auth.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ certs_auth_config | to_nice_json }} diff --git a/playbooks/roles/certs/templates/certs.conf.j2 b/playbooks/roles/certs/templates/certs.conf.j2 deleted file mode 100644 index 0d15a089305..00000000000 --- a/playbooks/roles/certs/templates/certs.conf.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[program:certs] -command={{ certs_venv_bin }}/python {{ certs_code_dir }}/certificate_agent.py -priority=999 -environment=SERVICE_VARIANT="certs",HOME="/" -user={{ common_web_user }} -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/certs/templates/certs.env.json.j2 b/playbooks/roles/certs/templates/certs.env.json.j2 deleted file mode 100644 index d53303433d3..00000000000 --- a/playbooks/roles/certs/templates/certs.env.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ certs_env_config | to_nice_json }} diff --git a/playbooks/roles/certs/templates/git_ssh.sh.j2 b/playbooks/roles/certs/templates/git_ssh.sh.j2 deleted file mode 100644 index 8ecdc9cb89d..00000000000 --- a/playbooks/roles/certs/templates/git_ssh.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ certs_git_identity }} "$@" diff --git a/playbooks/roles/codejail/defaults/main.yml b/playbooks/roles/codejail/defaults/main.yml new file mode 100644 index 00000000000..b2e859955a0 --- /dev/null +++ b/playbooks/roles/codejail/defaults/main.yml @@ -0,0 +1,10 @@ +--- +codejail_debian_packages: + - apparmor-utils +CODEJAIL_PYTHON_VERSIONS: + - python3.8 +codejail_sandbox_user: 'sandbox' +codejail_sandbox_group: 'sandbox' +codejail_sandbox_name_base: 'codejail_sandbox' +codejail_sandbox_env: '/home/{{ codejail_sandbox_user }}/{{ codejail_sandbox_name_base }}' +codejail_sandbox_caller: 'ubuntu' diff --git a/playbooks/roles/codejail/tasks/main.yml b/playbooks/roles/codejail/tasks/main.yml new file mode 100644 index 00000000000..87ac196986c --- /dev/null +++ b/playbooks/roles/codejail/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: Install codejail specific system packages + apt: + name: '{{ item }}' + state: present + update_cache: yes + with_items: '{{ codejail_debian_packages }}' +- name: Create group for sandbox user + group: + name: '{{ codejail_sandbox_group }}' + state: present + system: yes +- name: Create sandbox user + user: + name: '{{ codejail_sandbox_user }}' + group: '{{ codejail_sandbox_group }}' + state: present +- name: Create sandboxed virtual environments for every Python installation + shell: "virtualenv -p {{ item }} --always-copy {{ codejail_sandbox_env }}-{{ item }}" + become: true + with_items: "{{ CODEJAIL_PYTHON_VERSIONS }}" +- name: Clone codejail repo + git: + repo: 'https://github.com/openedx/codejail.git' + dest: '/tmp/codejail' + version: 'master' +- name: Install codejail sandbox dependencies + pip: + requirements: '/tmp/codejail/requirements/sandbox.txt' + virtualenv: "{{ codejail_sandbox_env }}-{{ item }}" + state: present + become: true + with_items: "{{ CODEJAIL_PYTHON_VERSIONS }}" +- name: Set permissions for sandboxed Python environments + file: + path: '{{ codejail_sandbox_env }}-{{ item }}' + recurse: yes + owner: '{{ codejail_sandbox_user }}' + group: '{{ codejail_sandbox_group }}' + with_items: "{{ CODEJAIL_PYTHON_VERSIONS }}" + become: true +- name: Template sudoers file + template: + src: "sudoers-template" + dest: "/etc/sudoers.d/01-sandbox" +- name: Create AppArmor profiles for each Python installation + template: + src: "apparmor-template" + dest: '/etc/apparmor.d/home.{{ codejail_sandbox_user }}.{{ codejail_sandbox_name_base }}-{{ item }}.bin.python' + with_items: "{{ CODEJAIL_PYTHON_VERSIONS }}" +- name: Parse AppArmor profiles + shell: 'apparmor_parser /etc/apparmor.d/home.{{ codejail_sandbox_user }}.{{ codejail_sandbox_name_base }}-{{ item }}.bin.python' + become: true + with_items: "{{ CODEJAIL_PYTHON_VERSIONS }}" +- name: Enforce AppArmor profile + shell: 'aa-enforce /etc/apparmor.d/home.{{ codejail_sandbox_user }}.{{ codejail_sandbox_name_base }}-{{ item }}.bin.python' + become: true + with_items: "{{ CODEJAIL_PYTHON_VERSIONS }}" diff --git a/playbooks/roles/codejail/templates/apparmor-template b/playbooks/roles/codejail/templates/apparmor-template new file mode 100644 index 00000000000..a7f4fb7d695 --- /dev/null +++ b/playbooks/roles/codejail/templates/apparmor-template @@ -0,0 +1,27 @@ +#include + +{{ codejail_sandbox_env }}-{{ item }}/bin/python { + #include + #include + + {{ codejail_sandbox_env }}-{{ item }}/** mr, + /tmp/codejail-*/ rix, + /tmp/codejail-*/** wrix, + + # Whitelist particiclar shared objects from the system + # python installation + # + /usr/lib/{{ item }}/lib-dynload/_json.so mr, + /usr/lib/{{ item }}/lib-dynload/_ctypes.so mr, + /usr/lib/{{ item }}/lib-dynload/_heapq.so mr, + /usr/lib/{{ item }}/lib-dynload/_io.so mr, + /usr/lib/{{ item }}/lib-dynload/_csv.so mr, + /usr/lib/{{ item }}/lib-dynload/datetime.so mr, + /usr/lib/{{ item }}/lib-dynload/_elementtree.so mr, + /usr/lib/{{ item }}/lib-dynload/pyexpat.so mr, + /usr/lib/{{ item }}/lib-dynload/future_builtins.so mr, + # + # Allow access to selections from /proc + # + /proc/*/mounts r, +} diff --git a/playbooks/roles/codejail/templates/sudoers-template b/playbooks/roles/codejail/templates/sudoers-template new file mode 100644 index 00000000000..27d4cc154ed --- /dev/null +++ b/playbooks/roles/codejail/templates/sudoers-template @@ -0,0 +1,11 @@ +{% for python_version in CODEJAIL_PYTHON_VERSIONS %} +{{ codejail_sandbox_caller }} ALL=({{ codejail_sandbox_user }}) SETENV:NOPASSWD:{{ codejail_sandbox_env }}-{{ python_version }}/bin/python +{% endfor %} +{{ codejail_sandbox_caller }} ALL=({{ codejail_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find +{{ codejail_sandbox_caller }} ALL=(ALL) NOPASSWD:/usr/bin/pkill + +{% for python_version in CODEJAIL_PYTHON_VERSIONS %} +Defaults!{{ codejail_sandbox_env }}-{{ python_version }}/bin/python !requiretty +{% endfor %} +Defaults!/usr/bin/find !requiretty +Defaults!/usr/bin/pkill !requiretty diff --git a/playbooks/roles/commerce_coordinator/defaults/main.yml b/playbooks/roles/commerce_coordinator/defaults/main.yml new file mode 100644 index 00000000000..2324e1af4e2 --- /dev/null +++ b/playbooks/roles/commerce_coordinator/defaults/main.yml @@ -0,0 +1,168 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role commerce_coordinator +# + +COMMERCE_COORDINATOR_GIT_IDENTITY: !!null + +COMMERCE_COORDINATOR_ENABLED: True + +# +# vars are namespace with the module name. +# +commerce_coordinator_service_name: 'commerce_coordinator' + +commerce_coordinator_user: "{{ commerce_coordinator_service_name }}" +commerce_coordinator_home: "{{ COMMON_APP_DIR }}/{{ commerce_coordinator_service_name }}" +commerce_coordinator_app_dir: "{{ COMMON_APP_DIR }}/{{ commerce_coordinator_service_name }}" +commerce_coordinator_code_dir: "{{ commerce_coordinator_app_dir }}/{{ commerce_coordinator_service_name }}" +commerce_coordinator_venvs_dir: "{{ commerce_coordinator_app_dir }}/venvs" +commerce_coordinator_venv_dir: "{{ commerce_coordinator_venvs_dir }}/commerce_coordinator" +commerce_coordinator_celery_default_queue: 'commerce_coordinator.default' +commerce_coordinator_hostname: "commerce-coordinator" + +COMMERCE_COORDINATOR_USE_PYTHON38: True + +COMMERCE_COORDINATOR_CELERY_ALWAYS_EAGER: false +COMMERCE_COORDINATOR_CELERY_BROKER_TRANSPORT: '' +COMMERCE_COORDINATOR_CELERY_BROKER_USER: '' +COMMERCE_COORDINATOR_CELERY_BROKER_PASSWORD: '' +COMMERCE_COORDINATOR_CELERY_BROKER_HOSTNAME: '' +COMMERCE_COORDINATOR_CELERY_BROKER_VHOST: '' + +commerce_coordinator_environment: + COMMERCE_COORDINATOR_CFG: '{{ COMMON_CFG_DIR }}/{{ commerce_coordinator_service_name }}.yml' + +commerce_coordinator_gunicorn_port: 18170 + +commerce_coordinator_debian_pkgs: [] + +COMMERCE_COORDINATOR_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ COMMON_GIT_PATH }}' + REPO: 'commerce-coordinator.git' + VERSION: '{{ COMMERCE_COORDINATOR_VERSION }}' + DESTINATION: "{{ commerce_coordinator_code_dir }}" + SSH_KEY: '{{ COMMERCE_COORDINATOR_GIT_IDENTITY }}' + +COMMERCE_COORDINATOR_NGINX_PORT: '1{{ commerce_coordinator_gunicorn_port }}' +COMMERCE_COORDINATOR_SSL_NGINX_PORT: '4{{ commerce_coordinator_gunicorn_port }}' + +COMMERCE_COORDINATOR_DEFAULT_DB_NAME: 'commerce-coordinator' +COMMERCE_COORDINATOR_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +COMMERCE_COORDINATOR_MYSQL_USER: 'commerce-coordinator001' +COMMERCE_COORDINATOR_MYSQL_PASSWORD: 'password' +COMMERCE_COORDINATOR_MYSQL_CONN_MAX_AGE: 60 + +COMMERCE_COORDINATOR_MEMCACHE: [ 'memcache' ] + +COMMERCE_COORDINATOR_DJANGO_SETTINGS_MODULE: 'commerce_coordinator.settings.production' +COMMERCE_COORDINATOR_DOMAIN: 'localhost' +COMMERCE_COORDINATOR_URL_ROOT: 'http://{{ COMMERCE_COORDINATOR_DOMAIN }}:{{ COMMERCE_COORDINATOR_NGINX_PORT }}' +COMMERCE_COORDINATOR_API_ROOT: '{{ COMMERCE_COORDINATOR_URL_ROOT }}/api' +COMMERCE_COORDINATOR_LOGOUT_URL: '{{ COMMERCE_COORDINATOR_URL_ROOT }}/logout/' + +COMMERCE_COORDINATOR_LANG: 'en_US.UTF-8' +COMMERCE_COORDINATOR_LANGUAGE_CODE: 'en' +COMMERCE_COORDINATOR_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +COMMERCE_COORDINATOR_SERVICE_USER: 'commerce_coordinator_service_user' + +COMMERCE_COORDINATOR_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ commerce_coordinator_service_name }}' +COMMERCE_COORDINATOR_MEDIA_ROOT: '{{ COMMERCE_COORDINATOR_DATA_DIR }}/media' +COMMERCE_COORDINATOR_MEDIA_URL: '/api/media/' + +COMMERCE_COORDINATOR_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ COMMERCE_COORDINATOR_MEDIA_ROOT }}' + MEDIA_URL: '{{ COMMERCE_COORDINATOR_MEDIA_URL }}' + +# TODO: Let edx_django_service manage COMMERCE_COORDINATOR_STATIC_ROOT in phase 2. +COMMERCE_COORDINATOR_STATIC_ROOT: '{{ COMMERCE_COORDINATOR_DATA_DIR }}/staticfiles' +COMMERCE_COORDINATOR_STATIC_URL: '/static/' + +COMMERCE_COORDINATOR_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +COMMERCE_COORDINATOR_CORS_ORIGIN_ALLOW_ALL: false +COMMERCE_COORDINATOR_CORS_ORIGIN_WHITELIST: [] + +COMMERCE_COORDINATOR_CSRF_COOKIE_SECURE: false +COMMERCE_COORDINATOR_CSRF_TRUSTED_ORIGINS: [] + +COMMERCE_COORDINATOR_VERSION: 'main' + +COMMERCE_COORDINATOR_GUNICORN_EXTRA: '' + +COMMERCE_COORDINATOR_EXTRA_APPS: [] + +COMMERCE_COORDINATOR_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +COMMERCE_COORDINATOR_CERTIFICATE_LANGUAGES: + 'en': 'English' + 'es_419': 'Spanish' + +# Used to automatically configure OAuth2 Client +COMMERCE_COORDINATOR_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'commerce_coordinator-sso-key' +COMMERCE_COORDINATOR_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'commerce_coordinator-sso-secret' +COMMERCE_COORDINATOR_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'commerce_coordinator-backend-service-key' +COMMERCE_COORDINATOR_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'commerce_coordinator-backend-service-secret' +COMMERCE_COORDINATOR_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +# API key for segment.io +COMMERCE_COORDINATOR_SEGMENT_KEY: !!null + +COMMERCE_COORDINATOR_DISCOVERY_BASE_URL: !!null +COMMERCE_COORDINATOR_LMS_BASE_URL: !!null + +commerce_coordinator_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ COMMERCE_COORDINATOR_CERTIFICATE_LANGUAGES }}' + COMMERCE_COORDINATOR_SERVICE_USER: '{{ COMMERCE_COORDINATOR_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ COMMERCE_COORDINATOR_LANGUAGE_COOKIE_NAME }}' + SEGMENT_KEY: "{{ COMMERCE_COORDINATOR_SEGMENT_KEY }}" + DISCOVERY_BASE_URL: "{{ COMMERCE_COORDINATOR_DISCOVERY_BASE_URL }}" + LMS_BASE_URL: "{{ COMMERCE_COORDINATOR_LMS_BASE_URL }}" + CORS_ORIGIN_WHITELIST: "{{ COMMERCE_COORDINATOR_CORS_ORIGIN_WHITELIST }}" + CSRF_TRUSTED_ORIGINS: "{{ COMMERCE_COORDINATOR_CSRF_TRUSTED_ORIGINS }}" + CSRF_COOKIE_SECURE: "{{ COMMERCE_COORDINATOR_CSRF_COOKIE_SECURE }}" + CELERY_ALWAYS_EAGER: '{{ COMMERCE_COORDINATOR_CELERY_ALWAYS_EAGER }}' + CELERY_BROKER_TRANSPORT: '{{ COMMERCE_COORDINATOR_CELERY_BROKER_TRANSPORT }}' + CELERY_BROKER_USER: '{{ COMMERCE_COORDINATOR_CELERY_BROKER_USER }}' + CELERY_BROKER_PASSWORD: '{{ COMMERCE_COORDINATOR_CELERY_BROKER_PASSWORD }}' + CELERY_BROKER_HOSTNAME: '{{ COMMERCE_COORDINATOR_CELERY_BROKER_HOSTNAME }}' + CELERY_BROKER_VHOST: '{{ COMMERCE_COORDINATOR_CELERY_BROKER_VHOST }}' + CELERY_DEFAULT_EXCHANGE: 'commerce_coordinator' + CELERY_DEFAULT_ROUTING_KEY: 'commerce_coordinator' + CELERY_DEFAULT_QUEUE: '{{ commerce_coordinator_celery_default_queue }}' + +# See edx_django_service_automated_users for an example of what this should be +COMMERCE_COORDINATOR_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) + +COMMERCE_COORDINATOR_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +COMMERCE_COORDINATOR_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +COMMERCE_COORDINATOR_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +COMMERCE_COORDINATOR_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# Worker settings +worker_django_settings_module: "{{ COMMERCE_COORDINATOR_DJANGO_SETTINGS_MODULE }}" +COMMERCE_COORDINATOR_CELERY_WORKERS: + - queue: '{{ commerce_coordinator_celery_default_queue }}' + concurrency: 1 + monitor: True +commerce_coordinator_workers: "{{ COMMERCE_COORDINATOR_CELERY_WORKERS }}" + +commerce_coordinator_post_migrate_commands: [] diff --git a/playbooks/roles/commerce_coordinator/meta/main.yml b/playbooks/roles/commerce_coordinator/meta/main.yml new file mode 100644 index 00000000000..9f702ec6726 --- /dev/null +++ b/playbooks/roles/commerce_coordinator/meta/main.yml @@ -0,0 +1,56 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role commerce_coordinator +# +dependencies: + - role: edx_django_service + edx_django_service_use_python38: '{{ COMMERCE_COORDINATOR_USE_PYTHON38 }}' + edx_django_service_version: '{{ COMMERCE_COORDINATOR_VERSION }}' + edx_django_service_name: '{{ commerce_coordinator_service_name }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ commerce_coordinator_service_name }}' + edx_django_service_user: '{{ commerce_coordinator_user }}' + edx_django_service_config_overrides: '{{ commerce_coordinator_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ commerce_coordinator_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ commerce_coordinator_gunicorn_port }}' + edx_django_service_django_settings_module: '{{ COMMERCE_COORDINATOR_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ commerce_coordinator_environment }}' + edx_django_service_gunicorn_extra: '{{ COMMERCE_COORDINATOR_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ COMMERCE_COORDINATOR_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ COMMERCE_COORDINATOR_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ COMMERCE_COORDINATOR_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ COMMERCE_COORDINATOR_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ COMMERCE_COORDINATOR_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ COMMERCE_COORDINATOR_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ COMMERCE_COORDINATOR_MEMCACHE }}' + edx_django_service_default_db_host: '{{ COMMERCE_COORDINATOR_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ COMMERCE_COORDINATOR_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ COMMERCE_COORDINATOR_MYSQL_USER }}' + edx_django_service_db_password: '{{ COMMERCE_COORDINATOR_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ COMMERCE_COORDINATOR_MYSQL_CONN_MAX_AGE }}' + edx_django_service_extra_apps: '{{ COMMERCE_COORDINATOR_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ COMMERCE_COORDINATOR_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ COMMERCE_COORDINATOR_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ COMMERCE_COORDINATOR_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ COMMERCE_COORDINATOR_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ COMMERCE_COORDINATOR_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ COMMERCE_COORDINATOR_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ COMMERCE_COORDINATOR_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ commerce_coordinator_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ COMMERCE_COORDINATOR_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_api_root: '{{ COMMERCE_COORDINATOR_API_ROOT }}' + edx_django_service_decrypt_config_enabled: '{{ COMMERCE_COORDINATOR_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ COMMERCE_COORDINATOR_COPY_CONFIG_ENABLED }}' + edx_django_service_migration_check_services: '{{ commerce_coordinator_service_name }},{{ commerce_coordinator_service_name }}-workers' + edx_django_service_enable_celery_workers: true + edx_django_service_workers: '{{ commerce_coordinator_workers }}' + edx_django_service_repos: '{{ COMMERCE_COORDINATOR_REPOS }}' + edx_django_service_hostname: '~^((stage|prod)-)?{{ commerce_coordinator_hostname }}.*' diff --git a/playbooks/roles/commerce_coordinator/tasks/main.yml b/playbooks/roles/commerce_coordinator/tasks/main.yml new file mode 100644 index 00000000000..e146e1d576a --- /dev/null +++ b/playbooks/roles/commerce_coordinator/tasks/main.yml @@ -0,0 +1,23 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role commerce_coordinator +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# + diff --git a/playbooks/roles/common/defaults/main.yml b/playbooks/roles/common/defaults/main.yml index 467b4973922..bd24ea666c9 100644 --- a/playbooks/roles/common/defaults/main.yml +++ b/playbooks/roles/common/defaults/main.yml @@ -1,70 +1,7 @@ +--- -# Override these variables -# to change the base directory -# where edX is installed - -COMMON_BASE_DIR: /edx -COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var" -COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app" -COMMON_LOG_DIR: "{{ COMMON_DATA_DIR }}/log" - -# these directories contain -# symlinks for convenience -COMMON_BIN_DIR: "{{ COMMON_BASE_DIR }}/bin" -COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc" - -COMMON_ENVIRONMENT: 'default_env' -COMMON_DEPLOYMENT: 'default_deployment' -COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple' -# do not include http/https -COMMON_GIT_MIRROR: 'github.com' -# override this var to set a different hostname -COMMON_HOSTNAME: !!null - -# Set to true to customize DNS search domains -COMMON_CUSTOM_DHCLIENT_CONFIG: false -# uncomment and specifity your domains. -# COMMON_DHCLIENT_DNS_SEARCH: ["ec2.internal","example.com"] - - -COMMON_MOTD_TEMPLATE: "motd.tail.j2" - -common_debian_pkgs: - - ntp - - ack-grep - - lynx-cur - - logrotate - - mosh - - rsyslog - - screen - - tree - - git - - unzip - - python2.7 - - python-pip - - python2.7-dev - # Not installed by default on vagrant ubuntu - # boxes - - curl - -common_pip_pkgs: - - virtualenv==1.10.1 - - virtualenvwrapper - -common_web_user: www-data -common_web_group: www-data -common_log_user: syslog - -common_git_ppa: "ppa:git-core/ppa" - -# Skip supervisor tasks -# Useful when supervisor is not installed (local dev) -devstack: False - -common_debian_variants: - - Ubuntu - - Debian - -common_redhat_variants: - - CentOS - - Red Hat Enterprise Linux +# Common variables are defined in the common_vars role on which this +# role depends. This is to allow sharing vars without creating +# side-effects. Any vars requred by this role should be added to +# common_vars/defaults/main.yml +# diff --git a/playbooks/roles/common/handlers/main.yml b/playbooks/roles/common/handlers/main.yml deleted file mode 100644 index 17c4bf2d346..00000000000 --- a/playbooks/roles/common/handlers/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: restart rsyslogd - service: name=rsyslog state=restarted - sudo: True -- name: restart ssh - service: name=ssh state=restarted - sudo: True diff --git a/playbooks/roles/common/meta/main.yml b/playbooks/roles/common/meta/main.yml index 2acb1b54780..5d6d490e437 100644 --- a/playbooks/roles/common/meta/main.yml +++ b/playbooks/roles/common/meta/main.yml @@ -1,3 +1,8 @@ --- dependencies: - - gh_users + - common_vars + - server_utils + - role: user + user_info: "{{ COMMON_USER_INFO }}" + - role: security + when: COMMON_SECURITY_UPDATES diff --git a/playbooks/roles/common/tasks/main.yml b/playbooks/roles/common/tasks/main.yml index 58b0d65c350..8b7af2d7590 100644 --- a/playbooks/roles/common/tasks/main.yml +++ b/playbooks/roles/common/tasks/main.yml @@ -1,104 +1,265 @@ --- -- name: Add user www-data - # This is the default user for nginx - user: > - name="{{ common_web_user }}" - shell=/bin/false +- name: Check Configuration Sources + fail: + msg: "Configuration Sources Checking (COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING) is enabled, you must define {{ item }}" + when: COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING and ({{ item }} is not defined or {{ item }} != True) + with_items: "{{ COMMON_EXTRA_CONFIGURATION_SOURCES }}" + tags: + - "install" + - "install:configuration" -- name: Create common directories - file: > - path={{ item }} state=directory owner=root - group=root mode=0755 +# ubuntu +- stat: + path: "{{ item }}" with_items: - - "{{ COMMON_DATA_DIR }}" - - "{{ COMMON_APP_DIR }}" - - "{{ COMMON_BIN_DIR }}" - - "{{ COMMON_CFG_DIR }}" + - "/usr/local/share/ca-certificates" + - "/usr/sbin/update-ca-certificates" + register: update_ca_certificates + +- name: Download digicert intermediate Certificate + get_url: + url: "{{ common_digicert_base_url }}/{{ common_digicert_name }}.pem" + dest: "/usr/local/share/ca-certificates/{{ common_digicert_name }}" + validate_certs: yes + when: update_ca_certificates is defined and update_ca_certificates.results[0].stat.exists == True + +- name: Update CA Certificates + shell: /usr/sbin/update-ca-certificates + when: update_ca_certificates is defined and update_ca_certificates.results[1].stat.exists == True -# Need to install python-pycurl to use Ansible's apt_repository module -- name: Install python-pycurl - apt: pkg=python-pycurl state=present update_cache=yes +# ec2-linux +- stat: + path: /usr/bin/update-ca-trust + register: update_ca_trust + +- name: Update CA Trust + shell: /usr/bin/update-ca-trust + when: update_ca_trust is defined and update_ca_trust.stat.exists == True + + +- name: Add common_users + # This is the default user for nginx + user: + name: "{{ item }}" + shell: /bin/false + with_items: + - "{{ common_web_user }}" + - "{{ common_log_user }}" + +# Determine if machine is provisioned via vagrant +# Some EC2-specific steps would need to be skipped +- name: check if instance is vagrant + stat: + path: /home/vagrant + register: vagrant_home_dir # Ensure that we get a current version of Git # GitHub requires version 1.7.10 or later # https://help.github.com/articles/https-cloning-errors - name: Add git apt repository - apt_repository: repo="{{ common_git_ppa }}" + apt_repository: + repo: "{{ common_git_ppa }}" + update_cache: yes + register: add_repo + until: add_repo is success + retries: 10 + delay: 5 + when: ansible_distribution in common_debian_variants + +# Ensure that we can install old software if need be. +- name: Add edX PPA apt key + apt_key: + id: "{{ COMMON_EDX_PPA_KEY_ID }}" + keyserver: "{{ COMMON_EDX_PPA_KEY_SERVER }}" + state: "present" + when: > + ansible_distribution in common_debian_variants and + (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty' or ansible_distribution_release == 'xenial') + +- name: Update expired apt keys + shell: apt-key adv --recv-keys --keyserver {{ COMMON_EDX_PPA_KEY_SERVER }} {{ COMMON_EDX_PPA_KEY_ID }} + when: > + ansible_distribution in common_debian_variants and + (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty' or ansible_distribution_release == 'xenial') + +- name: Add custom edX PPA + # Ensure that we get the latest version of python 2.7 + # MySQL 5.6 is from our own PPA: https://bugs.mysql.com/bug.php?id=84848 + apt_repository: + repo: "{{ COMMON_EDX_PPA }}" + when: > + ansible_distribution in common_debian_variants and + (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty' or ansible_distribution_release == 'xenial') + +# The deadsnakes PPA is required to install python3.5 on Bionic and Focal +# Xenial comes with python3.5 installed. +- name: add deadsnakes repository + apt_repository: + repo: "ppa:deadsnakes/ppa" + update_cache: yes + register: add_repo + until: add_repo is success + retries: 10 + delay: 5 + when: ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' or ansible_distribution_release == 'jammy' + tags: + - install + - install:system-requirements - name: Install role-independent useful system packages # do this before log dir setup; rsyslog package guarantees syslog user present - apt: > - pkg={{','.join(common_debian_pkgs)}} install_recommends=yes - state=present update_cache=yes + apt: + name: "{{ common_debian_pkgs }}" + install_recommends: yes + state: present + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: ansible_distribution in common_debian_variants -- name: Create common log directory - file: > - path={{ COMMON_LOG_DIR }} state=directory owner=syslog - group=syslog mode=0755 +- name: Install role-independent packages useful for devstack. + apt: + name: "{{ common_debian_devstack_pkgs }}" + install_recommends: yes + state: present + update_cache: yes + when: > + ansible_distribution in common_debian_variants and + ({{ devstack | default(False) }} or {{ edx_django_service_is_devstack | default(False) }}) + tags: + - "devstack" -- name: upload sudo config for key forwarding as root - copy: > - src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward - validate='visudo -c -f %s' owner=root group=root mode=0440 -- name: pip install virtualenv - pip: > - name="{{ item }}" state=present - extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" - with_items: common_pip_pkgs +- name: Install role-independent useful system packages from custom PPA + apt: + name: "{{ old_python_debian_pkgs }}" + install_recommends: yes + state: present + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: > + ansible_distribution in common_debian_variants and + ansible_distribution_release in old_python_ppa_releases and + (ansible_distribution_release != 'bionic' and ansible_distribution_release != 'focal') + +- name: Install role-independent useful system packages + yum: + name: "{{ common_redhat_pkgs }}" + state: present + update_cache: yes + when: ansible_distribution in common_redhat_variants -- name: Install rsyslog configuration for edX - template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644 - notify: restart rsyslogd +- name: Create common directories + file: + path: "{{ item.path }}" + state: "{{ item.state | default('directory') }}" + owner: "{{ item.owner | default('root') }}" + group: "{{ item.group | default('root') }}" + mode: "0755" + with_items: "{{ common_directories }}" + tags: + - install + - install:configuration + - common_directories -- name: Install logrotate configuration for edX - template: dest=/etc/logrotate.d/edx-services src=etc/logrotate.d/edx_logrotate.j2 owner=root group=root mode=644 +- name: upload sudo config for key forwarding as root + copy: + src: ssh_key_forward + dest: /etc/sudoers.d/ssh_key_forward + validate: 'visudo -c -f %s' + owner: root + group: root + mode: "0440" + +- name: pip install virtualenv + pip: + name: "{{ common_pip_pkgs }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + when: ansible_distribution in common_debian_variants - # This is in common to keep all logrotation config - # in the same role -- name: Install logrotate configuration for tracking file - template: dest=/etc/logrotate.d/tracking.log src=etc/logrotate.d/edx_logrotate_tracking_log.j2 owner=root group=root mode=644 - name: update /etc/hosts - template: src=hosts.j2 dest=/etc/hosts - when: COMMON_HOSTNAME + template: + src: hosts.j2 + dest: /etc/hosts + when: COMMON_HOSTNAME|length > 0 register: etc_hosts - name: update /etc/hostname - template: src=hostname.j2 dest=/etc/hostname - when: COMMON_HOSTNAME + template: + src: hostname.j2 + dest: /etc/hostname + when: COMMON_HOSTNAME|length > 0 register: etc_hostname - name: run hostname - shell: > - hostname -F /etc/hostname - when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed) - -- name: update /etc/dhcp/dhclient.conf - template: src=etc/dhcp/dhclient.conf.j2 dest=/etc/dhcp/dhclient.conf - when: COMMON_CUSTOM_DHCLIENT_CONFIG - - # Remove some of the default motd display on ubuntu - # and add a custom motd. These do not require an - # ssh restart -- name: update the ssh motd on Ubuntu - file: > - mode=0644 - path={{ item }} + shell: hostname -F /etc/hostname + when: COMMON_HOSTNAME|length >0 and (etc_hosts.changed or etc_hostname.changed) + +- name: Copy the templates to their respective destination + template: + dest: "{{ item.dest }}" + src: "{{ item.src }}" + owner: root + group: root + mode: "{{ item.mode | default('0644') }}" + register: config_templates with_items: - - "/etc/update-motd.d/10-help-text" - - "/usr/share/landscape/50-landscape-sysinfo" - - "/etc/update-motd.d/51-cloudguest" - - "/etc/update-motd.d/91-release-upgrade" - -- name: add ssh-warning banner motd - template: > - dest=/etc/motd.tail - src={{ COMMON_MOTD_TEMPLATE }} mode=0755 owner=root group=root - -- name: update ssh config - template: > - dest=/etc/ssh/sshd_config - src=sshd_config.j2 mode=0644 owner=root group=root - notify: restart ssh + - { src: 'edx_rsyslog.j2', dest: '/etc/rsyslog.d/99-edx.conf' } + - { src: 'etc/logrotate.d/hourly/edx_logrotate.j2', dest: '/etc/logrotate.d/hourly/edx-services' } + - { src: 'etc/cron.hourly/logrotate.j2', dest: '/etc/cron.hourly/logrotate', mode: '0555' } + - { src: 'etc/logrotate.d/hourly/edx_logrotate_tracking_log.j2', dest: '/etc/logrotate.d/hourly/tracking.log' } + +- name: Are we in a Docker container + shell: echo $(egrep -q 'docker' /proc/self/cgroup && echo 'yes' || echo 'no') + ignore_errors: yes + register: docker_container + + # TODO: restarts no matter which template has changed, need to examine + # the results +- name: restart rsyslogd + service: + name: rsyslog + state: restarted + become: True + when: config_templates.changed and docker_container.stdout != 'yes' + +- name: Add ntp alert script + template: + src: "log-ntp-alerts.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-ntp-alerts.sh" + owner: root + group: root + mode: "0755" + +# this script is executed by PAM on interactive login to calculate the +# number of package updates available for the MOTD. This slows down +# first login on machines by a significant amount +- name: Remove MOTD update checker + file: + dest: "/etc/update-motd.d/90-updates-available" + state: absent + tags: + - install + - install:configuration + +- name: Set up a cron job to run the log-ntp-alerts script + cron: + name: "log-ntp-alerts" + job: "{{ COMMON_BIN_DIR }}/log-ntp-alerts.sh >/dev/null 2>&1" + +- name: install logrotate configuration + template: + src: etc/logrotate.d/ntp.j2 + dest: /etc/logrotate.d/ntp + tags: + - "install" + - "install:configuration" + - "logrotate" diff --git a/playbooks/roles/common/templates/devstack_motd.tail.j2 b/playbooks/roles/common/templates/devstack_motd.tail.j2 deleted file mode 100644 index 226ed6c7178..00000000000 --- a/playbooks/roles/common/templates/devstack_motd.tail.j2 +++ /dev/null @@ -1,9 +0,0 @@ -******************************************************************* -* * -* _ _| |\ \/ / * -* / -_) _` | > < * -* \___\__,_|/_/\_\ * -* * -* Instructions and troubleshooting: * -* https://github.com/edx/configuration/wiki/edX-Developer-Stack * -******************************************************************* diff --git a/playbooks/roles/common/templates/edx_logrotate.j2 b/playbooks/roles/common/templates/edx_logrotate.j2 deleted file mode 100644 index 378ede27ea2..00000000000 --- a/playbooks/roles/common/templates/edx_logrotate.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{{ COMMON_LOG_DIR }}/*/edx.log { - create - compress - copytruncate - delaycompress - dateext - missingok - notifempty - daily - rotate 90 - size 1M -} diff --git a/playbooks/roles/common/templates/edx_logrotate_tracking_log.j2 b/playbooks/roles/common/templates/edx_logrotate_tracking_log.j2 deleted file mode 100644 index c3c05061a29..00000000000 --- a/playbooks/roles/common/templates/edx_logrotate_tracking_log.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{{ COMMON_LOG_DIR }}/tracking.log { - create - compress - delaycompress - dateext - missingok - notifempty - daily - rotate 365000 - size 1M -} diff --git a/playbooks/roles/common/templates/edx_rsyslog.j2 b/playbooks/roles/common/templates/edx_rsyslog.j2 index 3b4d0401459..81fa4f46ece 100644 --- a/playbooks/roles/common/templates/edx_rsyslog.j2 +++ b/playbooks/roles/common/templates/edx_rsyslog.j2 @@ -26,13 +26,13 @@ auth,authpriv.* /var/log/auth.log # Maybe one day this will be answered - http://stackoverflow.com/questions/10449447/how-to-avoid-syslogtag-from-rsyslog-template $template tracking,"%syslogtag%%msg%\n" -# looks for [service_name=] in the beginning of the log message, +# looks for [service_variant=] in the beginning of the log message, # if it exists the log will go into {{ COMMON_LOG_DIR }}//edx.log, otherwise # it will go into {{ COMMON_LOG_DIR }}/edx.log $template DynaFile,"{{ COMMON_LOG_DIR }}/%syslogtag:R,ERE,1,BLANK:\[service_variant=([a-zA-Z_-]*)\].*--end%/edx.log" local0.* -?DynaFile -local1.* {{ COMMON_LOG_DIR }}/tracking.log;tracking +local1.* {{ COMMON_LOG_DIR }}/tracking/tracking.log;tracking #cron.* /var/log/cron.log #daemon.* -/var/log/daemon.log kern.* -/var/log/kern.log @@ -77,17 +77,3 @@ news.notice -/var/log/news/news.notice # news.=crit;news.=err;news.=notice;\ # *.=debug;*.=info;\ # *.=notice;*.=warn /dev/tty8 - -# The named pipe /dev/xconsole is for the `xconsole' utility. To use it, -# you must invoke `xconsole' with the `-file' option: -# -# $ xconsole -file /dev/xconsole [...] -# -# NOTE: adjust the list below, or you'll go crazy if you have a reasonably -# busy site.. -# -daemon.*;mail.*;\ - news.err;\ - *.=debug;*.=info;\ - *.=notice;*.=warn |/dev/xconsole - diff --git a/playbooks/roles/common/templates/etc/cron.hourly/logrotate.j2 b/playbooks/roles/common/templates/etc/cron.hourly/logrotate.j2 new file mode 100644 index 00000000000..11320c3fa0a --- /dev/null +++ b/playbooks/roles/common/templates/etc/cron.hourly/logrotate.j2 @@ -0,0 +1,4 @@ +#!/bin/sh + +test -x /usr/sbin/logrotate || exit 0 +/usr/sbin/logrotate /etc/logrotate.d/hourly diff --git a/playbooks/roles/common/templates/etc/logrotate.d/edx_logrotate.j2 b/playbooks/roles/common/templates/etc/logrotate.d/edx_logrotate.j2 deleted file mode 100644 index 378ede27ea2..00000000000 --- a/playbooks/roles/common/templates/etc/logrotate.d/edx_logrotate.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{{ COMMON_LOG_DIR }}/*/edx.log { - create - compress - copytruncate - delaycompress - dateext - missingok - notifempty - daily - rotate 90 - size 1M -} diff --git a/playbooks/roles/common/templates/etc/logrotate.d/edx_logrotate_tracking_log.j2 b/playbooks/roles/common/templates/etc/logrotate.d/edx_logrotate_tracking_log.j2 deleted file mode 100644 index c3c05061a29..00000000000 --- a/playbooks/roles/common/templates/etc/logrotate.d/edx_logrotate_tracking_log.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{{ COMMON_LOG_DIR }}/tracking.log { - create - compress - delaycompress - dateext - missingok - notifempty - daily - rotate 365000 - size 1M -} diff --git a/playbooks/roles/common/templates/etc/logrotate.d/hourly/edx_logrotate.j2 b/playbooks/roles/common/templates/etc/logrotate.d/hourly/edx_logrotate.j2 new file mode 100644 index 00000000000..aa1bf1ce8c8 --- /dev/null +++ b/playbooks/roles/common/templates/etc/logrotate.d/hourly/edx_logrotate.j2 @@ -0,0 +1,22 @@ +{{ COMMON_LOG_DIR }}/*/edx.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -HUP rsyslogd + endscript + {% if COMMON_OBJECT_STORE_EDX_LOG_SYNC -%} + lastaction + {{ COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT }} -d "{{ COMMON_LOG_DIR }}/lms" -b "{{ COMMON_OBJECT_STORE_LOG_SYNC_BUCKET }}" -p "{{ COMMON_OBJECT_STORE_EDX_LOG_SYNC_PREFIX }}lms/" + {{ COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT }} -d "{{ COMMON_LOG_DIR }}/cms" -b "{{ COMMON_OBJECT_STORE_LOG_SYNC_BUCKET }}" -p "{{ COMMON_OBJECT_STORE_EDX_LOG_SYNC_PREFIX }}cms/" + endscript + {% endif -%} +} diff --git a/playbooks/roles/common/templates/etc/logrotate.d/hourly/edx_logrotate_tracking_log.j2 b/playbooks/roles/common/templates/etc/logrotate.d/hourly/edx_logrotate_tracking_log.j2 new file mode 100644 index 00000000000..102355eafb3 --- /dev/null +++ b/playbooks/roles/common/templates/etc/logrotate.d/hourly/edx_logrotate_tracking_log.j2 @@ -0,0 +1,21 @@ +{{ COMMON_LOG_DIR }}/tracking/tracking.log { + {% for config in COMMON_TRACKING_LOG_ROTATION %} + {{ config }} + {% endfor %} + compress + create + dateext + dateformat -%Y%m%d-%s + missingok + nodelaycompress + notifempty + rotate 16000 + postrotate + /usr/bin/killall -HUP rsyslogd + endscript + lastaction + {% if COMMON_OBJECT_STORE_LOG_SYNC -%} + {{ COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT }} -d "{{ COMMON_LOG_DIR }}/tracking" -b "{{ COMMON_OBJECT_STORE_LOG_SYNC_BUCKET }}" -p "{{ COMMON_OBJECT_STORE_LOG_SYNC_PREFIX }}" + {% endif -%} + endscript +} diff --git a/playbooks/roles/common/templates/etc/logrotate.d/ntp.j2 b/playbooks/roles/common/templates/etc/logrotate.d/ntp.j2 new file mode 100644 index 00000000000..b37d11fe93e --- /dev/null +++ b/playbooks/roles/common/templates/etc/logrotate.d/ntp.j2 @@ -0,0 +1,8 @@ +{{ COMMON_LOG_DIR }}/ntp.log { + compress + dateext + dateformat -%Y%m%d-%s + missingok + daily + rotate 3 +} diff --git a/playbooks/roles/common/templates/log-ntp-alerts.sh.j2 b/playbooks/roles/common/templates/log-ntp-alerts.sh.j2 new file mode 100644 index 00000000000..a1a7bae652b --- /dev/null +++ b/playbooks/roles/common/templates/log-ntp-alerts.sh.j2 @@ -0,0 +1,16 @@ +#!/bin/bash + +log_directory={{ COMMON_LOG_DIR }} +reach=$(ntpq -c associations | awk '{print $5}' | grep yes) +if [[ ${reach} == *"no"* ]]; then + echo $(date -u) $(hostname) "NTPD not synchronized - Please investigate" >> ${log_directory}/ntp.log +fi + +limit=100 # limit in milliseconds +offsets=$(ntpq -nc peers | tail -n +3 | cut -c 62-66 | tr -d '-') +for offset in ${offsets}; do + if [ ${offset:-0} -ge ${limit:-100} ]; then + echo $(date -u) $(hostname) "An NTPD offset with value $offset is excessive - Please investigate" >> ${log_directory}/ntp.log + exit 1 + fi +done diff --git a/playbooks/roles/common/templates/motd.tail.j2 b/playbooks/roles/common/templates/motd.tail.j2 deleted file mode 100644 index 1a3d5e47a00..00000000000 --- a/playbooks/roles/common/templates/motd.tail.j2 +++ /dev/null @@ -1,12 +0,0 @@ -******************************************************************* -* _ __ __ * -* _ _| |\ \/ / This system is for the use of authorized * -* / -_) _` | > < users only. Usage of this system may be * -* \___\__,_|/_/\_\ monitored and recorded by system personnel. * -* * -* Anyone using this system expressly consents to such monitoring * -* and is advised that if such monitoring reveals possible * -* evidence of criminal activity, system personnel may provide the * -* evidence from such monitoring to law enforcement officials. * -* * -******************************************************************* diff --git a/playbooks/roles/common/templates/sshd_config.j2 b/playbooks/roles/common/templates/sshd_config.j2 deleted file mode 100644 index ff3cdbc51d2..00000000000 --- a/playbooks/roles/common/templates/sshd_config.j2 +++ /dev/null @@ -1,90 +0,0 @@ -# {{ ansible_managed }} -# -# Changes from the default Ubuntu ssh config: -# - LogLevel set to VERBOSE -# - -# What ports, IPs and protocols we listen for -Port 22 -# Use these options to restrict which interfaces/protocols sshd will bind to -#ListenAddress :: -#ListenAddress 0.0.0.0 -Protocol 2 -# HostKeys for protocol version 2 -HostKey /etc/ssh/ssh_host_rsa_key -HostKey /etc/ssh/ssh_host_dsa_key -HostKey /etc/ssh/ssh_host_ecdsa_key -#Privilege Separation is turned on for security -UsePrivilegeSeparation yes - -# Lifetime and size of ephemeral version 1 server key -KeyRegenerationInterval 3600 -ServerKeyBits 768 - -# Logging -SyslogFacility AUTH -LogLevel VERBOSE - -# Authentication: -LoginGraceTime 120 -PermitRootLogin yes -StrictModes yes - -RSAAuthentication yes -PubkeyAuthentication yes -#AuthorizedKeysFile %h/.ssh/authorized_keys - -# Don't read the user's ~/.rhosts and ~/.shosts files -IgnoreRhosts yes -# For this to work you will also need host keys in /etc/ssh_known_hosts -RhostsRSAAuthentication no -# similar for protocol version 2 -HostbasedAuthentication no -# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication -#IgnoreUserKnownHosts yes - -# To enable empty passwords, change to yes (NOT RECOMMENDED) -PermitEmptyPasswords no - -# Change to yes to enable challenge-response passwords (beware issues with -# some PAM modules and threads) -ChallengeResponseAuthentication no - -# Change to no to disable tunnelled clear text passwords -PasswordAuthentication no - -# Kerberos options -#KerberosAuthentication no -#KerberosGetAFSToken no -#KerberosOrLocalPasswd yes -#KerberosTicketCleanup yes - -# GSSAPI options -#GSSAPIAuthentication no -#GSSAPICleanupCredentials yes - -X11Forwarding yes -X11DisplayOffset 10 -PrintMotd no -PrintLastLog yes -TCPKeepAlive yes -#UseLogin no - -#MaxStartups 10:30:60 -#Banner /etc/issue - -# Allow client to pass locale environment variables -AcceptEnv LANG LC_* - -Subsystem sftp /usr/lib/openssh/sftp-server - -# Set this to 'yes' to enable PAM authentication, account processing, -# and session processing. If this is enabled, PAM authentication will -# be allowed through the ChallengeResponseAuthentication and -# PasswordAuthentication. Depending on your PAM configuration, -# PAM authentication via ChallengeResponseAuthentication may bypass -# the setting of "PermitRootLogin without-password". -# If you just want the PAM account and session checks to run without -# PAM authentication, then enable this but set PasswordAuthentication -# and ChallengeResponseAuthentication to 'no'. -UsePAM yes diff --git a/playbooks/roles/common_vars/defaults/main.yml b/playbooks/roles/common_vars/defaults/main.yml new file mode 100644 index 00000000000..b81f99637ea --- /dev/null +++ b/playbooks/roles/common_vars/defaults/main.yml @@ -0,0 +1,309 @@ + +# Override these variables +# to change the base directory +# where edX is installed + +# Set global htpasswd credentials +COMMON_ENABLE_BASIC_AUTH: False +COMMON_HTPASSWD_USER: edx +COMMON_HTPASSWD_PASS: edx +COMMON_BASIC_AUTH_EXCEPTIONS: + - 192.168.0.0/16 + - 172.16.0.0/12 + +# Settings to use for calls to edxapp manage.py +COMMON_EDXAPP_SETTINGS: 'production' + +# Turn on syncing logs on rotation for edx +# application and tracking logs, must also +# have the aws or openstack role installed +COMMON_OBJECT_STORE_LOG_SYNC: False +COMMON_OBJECT_STORE_LOG_SYNC_BUCKET: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}" +COMMON_OBJECT_STORE_LOG_SYNC_PREFIX: "logs/tracking/" +COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT: "{{ COMMON_BIN_DIR }}/send-logs-to-object-store" +COMMON_OBJECT_STORE_LOG_SYNC_ON_EXIT: "{{ COMMON_BIN_DIR }}/sync-logs-on-exit" + +COMMON_OBJECT_STORE_EDX_LOG_SYNC_PREFIX : "logs/edx/" +COMMON_OBJECT_STORE_EDX_LOG_SYNC : false + +COMMON_BASE_DIR: /edx +COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var" +COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app" +COMMON_LOG_DIR: "{{ COMMON_DATA_DIR }}/log" +# Override this to create +# common users in all roles +COMMON_USER_INFO: [] +# these directories contain +# symlinks for convenience +COMMON_BIN_DIR: "{{ COMMON_BASE_DIR }}/bin" +COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc" + +common_directories: + - path: "{{ COMMON_DATA_DIR }}" + - path: "{{ COMMON_APP_DIR }}" + - path: "{{ COMMON_BIN_DIR }}" + - path: "{{ COMMON_CFG_DIR }}" + - path: "{{ COMMON_LOG_DIR }}" + owner: "{{ common_log_user }}" + group: "{{ common_log_user }}" + - path: "/etc/logrotate.d/hourly" + - path: "/etc/rsyslog.d/50-default.conf" + state: absent + +COMMON_ENVIRONMENT: 'default_env' +COMMON_DEPLOYMENT: 'default_deployment' +COMMON_PIP_VERBOSITY: '' +COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple' +COMMON_NPM_MIRROR_URL: 'https://registry.npmjs.org/' +COMMON_UBUNTU_APT_KEYSERVER: "http://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=" + +common_digicert_name: "DigiCertSHA2SecureServerCA.crt" +common_digicert_base_url: "https://cacerts.digicert.com/" + +COMMON_EDX_PPA: "deb http://ppa.edx.org {{ ansible_distribution_release }} main" +COMMON_EDX_PPA_KEY_SERVER: "keyserver.ubuntu.com" +COMMON_EDX_PPA_KEY_ID: "69464050" + +#The git checkout url in most roles is constructed from these values +#e.g. https://{{COMMON_GIT_MIRROR}}/{{COMMON_GIT_PATH}}/repo.git +COMMON_GIT_PROTOCOL: 'https' # https|ssh +COMMON_GIT_MIRROR: 'github.com' # git server hostname +COMMON_GIT_PATH: 'edx' # git path prefix + +# override this var to set a different hostname +COMMON_HOSTNAME: "" +COMMON_DEPLOY_HOSTNAME: "" + +# Set to true to customize DNS search domains +COMMON_CUSTOM_DHCLIENT_CONFIG: false +# uncomment and specifity your domains. +# COMMON_DHCLIENT_DNS_SEARCH: ["ec2.internal","example.com"] + +COMMON_SSH_PASSWORD_AUTH: "no" + +COMMON_SECURITY_UPDATES: no +# These are three maintenance accounts across all databases +# the read only user is is granted select privs on all dbs +# the admin user is granted create user privs on all dbs +# the migrate user is granted table alter privs on all dbs + +COMMON_MYSQL_READ_ONLY_USER: 'read_only' +COMMON_MYSQL_READ_ONLY_PASS: 'password' +COMMON_ANALYTICS_MYSQL_READ_ONLY_USER: 'read_only' +COMMON_ANALYTICS_MYSQL_READ_ONLY_PASS: 'password' +COMMON_MYSQL_ADMIN_USER: 'admin' +COMMON_MYSQL_ADMIN_PASS: 'password' +COMMON_MYSQL_MIGRATE_USER: 'migrate' +COMMON_MYSQL_MIGRATE_PASS: 'password' + +COMMON_MONGO_READ_ONLY_USER: 'read_only' +COMMON_MONGO_READ_ONLY_PASS: !!null +# Enable installation of the Datadog agent (infrastructure monitoring) +COMMON_ENABLE_DATADOG: False +# Enable APM monitoring with Datadog (metrics, traces, and logs) +COMMON_ENABLE_DATADOG_APP: False +COMMON_ENABLE_NGINXTRA: False +COMMON_ENABLE_SPLUNKFORWARDER: False +COMMON_ENABLE_NEWRELIC: False +COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: False +# enables app reporting, you must enable newrelic +# as well +COMMON_ENABLE_NEWRELIC_APP: False +COMMON_ENABLE_MINOS: False +COMMON_ENABLE_INSIGHTVM_AGENT: False +COMMON_TAG_EC2_INSTANCE: False +COMMON_BOTO_VERSION: '2.48.0' +COMMON_NODE_VERSION: '12.13.0' +common_redhat_pkgs: + - ntp + - lynx + - logrotate + - rsyslog + - git + - unzip + - acl + +common_debian_pkgs_default: + - apt-transport-https + - ntp + - acl + - iotop + - lynx + - logrotate + - rsyslog + - git + - unzip + - net-tools + - python3-pip + +common_release_specific_debian_pkgs: + xenial: + - python-pip + - python2.7-dev + bionic: + - python-pip + - python3.5-dev + focal: + - python3.5-dev + jammy: + - python3.8 + +common_debian_pkgs: "{{ common_debian_pkgs_default + common_release_specific_debian_pkgs[ansible_distribution_release] }}" + +common_debian_devstack_pkgs: + - watchman + +# Packages that should be installed from our custom PPA, i.e. COMMON_EDX_PPA +old_python_debian_pkgs: + - "python2.7=2.7.10-0+{{ ansible_distribution_release }}1" + + +COMMON_PIP_VERSION: '21.2.1' + +common_pip_pkgs: + - pip=={{ COMMON_PIP_VERSION }} + - configparser==4.0.2 + - setuptools==44.1.0 + - virtualenv==20.2.0 + - zipp==1.2.0 + - boto3 + - importlib-resources==3.2.1 + +common_web_user: www-data +common_web_group: www-data +common_log_user: syslog + +common_git_ppa: "ppa:git-core/ppa" + +# Skip supervisor tasks +# When set to true this flag will allow you to install everything but keep +# supervisor from starting any of the services. +# Service files will be placed in supervisor's conf.available.d but not linked +# to supervisors 'conf.d' directory. +disable_edx_services: False + +# Some apps run differently in dev mode(forums) +# so different start scripts are generated in dev mode. +devstack: False + +# Some cluster apps need special settings when in vagrant +# due to eth0 always being the same IP address +vagrant_cluster: False + +common_debian_variants: + - Ubuntu + - Debian + +# We only have to install old Python for these releases: +old_python_ppa_releases: + - trusty + +common_redhat_variants: + - CentOS + - Red Hat Enterprise Linux + - Amazon + +# Gunicorn pre_request function to log request before it is processed further. +common_pre_request: | + def pre_request(worker, req): + worker.log.info("%s %s" % (req.method, req.path)) + +# Code used in gunicorn post_fork functions to be sure we aren't sharing cache +# connections among forked children. +common_close_all_caches: | + def close_all_caches(): + # Close the cache so that newly forked workers cannot accidentally share + # the socket with the processes they were forked from. This prevents a race + # condition in which one worker could get a cache response intended for + # another worker. + # We do this in a way that is safe for 1.4 and 1.8 while we still have some + # 1.4 installations. + from django.conf import settings + from django.core import cache as django_cache + if hasattr(django_cache, 'caches'): + get_cache = django_cache.caches.__getitem__ + else: + get_cache = django_cache.get_cache + for cache_name in settings.CACHES: + cache = get_cache(cache_name) + if hasattr(cache, 'close'): + cache.close() + + # The 1.4 global default cache object needs to be closed also: 1.4 + # doesn't ensure you get the same object when requesting the same + # cache. The global default is a separate Python object from the cache + # you get with get_cache("default"), so it will have its own connection + # that needs to be closed. + cache = django_cache.cache + if hasattr(cache, 'close'): + cache.close() + +COMMON_TRACKING_LOG_ROTATION: + - size 1M + +# If you include configuration from multiple sources and want to ensure that +# all sources are included when running playbooks, this provides a way to check that. +# As an example, if you have a secure configuration repo, you can do +# COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING: true +# COMMON_EXTRA_CONFIGURATION_SOURCES: +# - COMMON_USING_SECURE_REPO +# The common role will then ensure that COMMON_USING_SECURE_REPO is defined and true. +# This will ensure that you've included a file that sets +# COMMON_USING_SECURE_REPO: true +COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING: false +COMMON_EXTRA_CONFIGURATION_SOURCES: [] + +COMMON_LMS_BASE_URL: 'http://127.0.0.1:8000/' +COMMON_OAUTH_BASE_URL: '{{ COMMON_LMS_BASE_URL }}' +COMMON_OAUTH_PUBLIC_URL_ROOT: '{{ COMMON_OAUTH_BASE_URL }}/oauth2' +COMMON_OAUTH_URL_ROOT: '{{ COMMON_OAUTH_PUBLIC_URL_ROOT }}' +COMMON_OAUTH_LOGOUT_URL: '{{ COMMON_OAUTH_BASE_URL }}/logout' + +COMMON_LOGO_URL: "https://edx-cdn.org/v3/default/logo.svg" +COMMON_LOGO_TRADEMARK_URL: "https://edx-cdn.org/v3/default/logo-trademark.svg" +COMMON_LOGO_WHITE_URL: "https://edx-cdn.org/v3/default/logo-white.svg" +COMMON_FAVICON_URL: "https://edx-cdn.org/v3/default/favicon.ico" + +############ +# Settings related to JSON Web Tokens (JWTs). +# See https://github.com/openedx/edx-platform/blob/master/openedx/core/djangoapps/oauth_dispatch/docs/decisions/0003-use-jwt-as-oauth-tokens-remove-openid-connect.rst +COMMON_JWT_AUDIENCE: 'SET-ME-PLEASE' +COMMON_JWT_ISSUER: '{{ COMMON_OAUTH_URL_ROOT }}' + +# The following should be the string representation of a JSON Web Key Set (JWK set) +# containing active public keys for signing JWTs. +# See https://github.com/openedx/edx-platform/blob/master/openedx/core/djangoapps/oauth_dispatch/docs/decisions/0008-use-asymmetric-jwts.rst +COMMON_JWT_PUBLIC_SIGNING_JWK_SET: '' + +COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD: 'edx-jwt-cookie-header-payload' +COMMON_JWT_AUTH_COOKIE_SIGNATURE: 'edx-jwt-cookie-signature' + +# To be deprecated, in favor of the above COMMON_JWT_PUBLIC_SIGNING_JWK_SET. +COMMON_JWT_SECRET_KEY: 'SET-ME-PLEASE' +############ + +# Set worker user default +CREATE_SERVICE_WORKER_USERS: True + +COMMON_ENABLE_AWS_ROLE: true + +# Remote config +COMMON_HERMES_ENABLED: false + +COMMON_DECRYPT_CONFIG_ENABLED: false +COMMON_COPY_CONFIG_ENABLED: false + +# Disable logging of config rendering which has secrets +COMMON_CONFIG_NO_LOGGING: True + +# Default sandbox build flag to false +SANDBOX_CONFIG: False + +# Should we create the JWT settings? +CONFIGURE_JWTS: false + +# Variable to control setting up the retirement services +COMMON_RETIREMENT_SERVICE_SETUP: false + +# How to log in as "lms" to xqueue. +COMMON_XQUEUE_LMS_PASSWORD: password diff --git a/playbooks/roles/common_vars/tasks/main.yml b/playbooks/roles/common_vars/tasks/main.yml new file mode 100644 index 00000000000..086c78ee11e --- /dev/null +++ b/playbooks/roles/common_vars/tasks/main.yml @@ -0,0 +1,4 @@ +--- + +# There should never be any side-effecting tasks included in this role. +# It is used solely for making shared variables available across roles. \ No newline at end of file diff --git a/playbooks/roles/conductor/defaults/main.yml b/playbooks/roles/conductor/defaults/main.yml new file mode 100644 index 00000000000..3e2f1f595e8 --- /dev/null +++ b/playbooks/roles/conductor/defaults/main.yml @@ -0,0 +1,21 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role conductor +# + +# .env vars + +# nginx vars +NGINX_CONDUCTOR_PROXY_INTERCEPT_ERRORS: true +CONDUCTOR_STATIC_SITES: [] +CONDUCTOR_REDIRECT_ROOT: false +CONDUCTOR_ROOT_REDIRECT_PATH: "" + diff --git a/playbooks/roles/conductor/meta/main.yml b/playbooks/roles/conductor/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/conductor/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/conductor/tasks/main.yml b/playbooks/roles/conductor/tasks/main.yml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/playbooks/roles/config-encoders/.gitignore b/playbooks/roles/config-encoders/.gitignore new file mode 100644 index 00000000000..0d20b6487c6 --- /dev/null +++ b/playbooks/roles/config-encoders/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/playbooks/roles/config-encoders/LICENSE b/playbooks/roles/config-encoders/LICENSE new file mode 100644 index 00000000000..10926e87f11 --- /dev/null +++ b/playbooks/roles/config-encoders/LICENSE @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/playbooks/roles/config-encoders/README.rst b/playbooks/roles/config-encoders/README.rst new file mode 100644 index 00000000000..a68c0099f4f --- /dev/null +++ b/playbooks/roles/config-encoders/README.rst @@ -0,0 +1,1289 @@ +Originally authored by Jiri Tyr (https://github.com/jtyr/ansible-config_encoder_filters) + +Config Encoder Filters +====================== + +This is an Ansible role used to deliver the Config Encoder Filters as +a dependency of another Ansible role. + + +Table of Contents +----------------- + +- Motivation_ +- Example_ +- Usage_ +- Installation_ +- `Supported encoders`_ + - encode_apache_ + - encode_erlang_ + - encode_haproxy_ + - encode_ini_ + - encode_json_ + - encode_logstash_ + - encode_nginx_ + - encode_pam_ + - encode_toml_ + - encode_xml_ + - encode_yaml_ +- Utilities_ + - template_replace_ +- License_ +- Author_ + +---- + + +.. _Motivation: + +Motivation +---------- + +Ansible Galaxy contains a lot of useful roles. Some of them exist in +many variations which differ only by their parameterization. The +parameterization is often used mainly in templates which generate the +configuration file. A good example such issues are roles for Nginx of +which you can find almost 200 in the Ansible Galaxy. + +Nginx is possible to configure in infinite number of ways and therefore +is almost impossible to create an Ansible template file which would +capture all possible variations of the configuration. Even if a suitable +roles is found, users often want to customize even more. This is where +people normally clone the role and add parameters they are missing. Some +people try to get the change back into the original role by creating a +pull request (PR) but sometimes such change is not accepted by the +maintainer of the original role and the user ends up maintaining his/her +own clone forever. + +This is why the Config Encoder filters were developed to facilitate the +creation of Ansible roles with universal configuration. The structure of +the configuration file is described as a YAML data structure stored in a +variable. The variable together with he Config Encoder filter is then +used in the template file which is used to generate the final +configuration file. This approach allows to shift the paradigm of +thinking about configuration files as templates to thinking about them as +data structures. The data structure can be dynamically generated which +allows to create truly universal configuration. + + +.. _Example: + +Example +------- + +Imagine the following INI file:: + + [section1] + option11=value11 + option12=value12 + +Such configuration file can be described as a YAML data structure:: + + myapp_config: + section1: + option11: value11 + option12: value12 + +The variable is then used together with the ``encode_ini`` Config Encoder +filter in the template file ``myapp.cfg.j2`` like this:: + + {{ myapp_config | encode_ini }} + +And finally, the template file is used in a task like this:: + + - name: Create config file + template: + src: myapp.cfg.j2 + dest: /etc/myapp/myapp.cfg + +When the task is executed, it creates exactly the same file as the +original INI file. + +So we can describe the configuration as a data structure which is then +converted into the final configuration file format with the Config +Encoder filter. + +In order to change the above configuration, we would have to overwrite +the ``myapp_config`` which is not very practical. Therefore we break the +monolithic variable into a set of variables which will allow us to change +any part of the configuration without the need to overwrite the whole +data structure:: + + myapp_config_section1_option11: value11 + myapp_config_section1_option12: value12 + + myapp_config_section1__default: +  option11: "{{ myapp_config_section1_option11 }}" +  option12: "{{ myapp_config_section1_option12 }}" + + myapp_config_section1__custom: {} + + myapp_config_default: +  section1: "{{ +    myapp_config_section1__default.update(myapp_config_section1__custom) }}{{ +    myapp_config_section1__default }}" + + myapp_config__custom: {} + + myapp_config: "{{ +  myapp_config__default.update(myapp_config__custom) }}{{ +  myapp_config__default }}" + +Like this, if we want to change the value of the ``option11``, we only +override the variable ``myapp_config_section1_option11``:: + + myapp_config_section1_option11: My new value + +If we want to add a new option into the ``section1``, we add it into the +variable ``myapp_config_section1__custom`` which is then merged with the +default list of options:: + + myapp_config_section1__custom: + section13: value13 + +And if we want to add a new section, we add it into the variable +``myapp_config__custom`` which is then merged with the default list of +sections:: + + myapp_config__custom: + section2: + option21: value21 + +The above is showing an example for INI configuration files only but the +same principle is possible to use for all the supported Config Encoders +listed bellow. + + +.. _Usage: + +Usage +----- + +Config Encoder filters can be used in any Ansible role by adding the +``config_encoder_filters`` role into the list of dependencies in the +``meta/main.yml`` file:: + + dependencies: + - config_encoder_filters + +The usage directy from a Playbook requires to add the +``config_encoder_filters`` into the list of roles:: + + - name: My test Play + hosts: all + roles: + - config_encoder_filters + tasks: + - name: Create config file + template: + src: my.conf.j2 + dest: /tmp/my.conf + + +.. _Installation: + +Installation +------------ + +The role can be downloaded either via Ansible Galaxy command:: + + $ ansible-galaxy install jtyr.config_encoder_filters,master,config_encoder_filters + +or via Ansible Gallaxy requirements file:: + + $ cat ./requirements.yaml + --- + + - src: https://github.com/jtyr/ansible-config_encoder_filters.git + name: config_encoder_filters + $ ansible-galaxy -r ./requirements.yaml + +or via Git:: + + $ git clone https://github.com/jtyr/ansible-config_encoder_filters.git config_encoder_filters + + +.. _`Supported encoders`: + +Supported encoders +------------------ + +The following is the list of supported Config Encoder filters. Each +filter requires special data structure as its input. Each filter also has +a set of parameters which can modify the behaviour of the filter. + + +.. _encode-apache: + +encode_apache +^^^^^^^^^^^^^ + +This filter helps to create configuration in the format used by Apache +web server. The expected data structure is the following:: + + my_apache_vhost: + content: + - sections: + - name: VirtualHost + param: "*:80" + content: + - options: + - DocumentRoot: /www/example1 + - ServerName: www.example.com + - ErrorLog: /var/log/httpd/www.example.com-error_log + - CustomLog: + - /var/log/httpd/www.example.com-access_log + - common + - "#": Other directives here ... + +The variable starts with ``content`` which can contain list of +``sections`` or ``options``. ``sections`` then contain list of individual +sections which has the ``name``, ``param`` and ``content`` parameter. The +``content`` can again contain a list of `sections`` or ``options``. + +The above variable can be used in the template file like this:: + + {{ my_apache_vhost | encode_apache }} + +The output of such template would be:: + + + DocumentRoot /www/example1 + ServerName www.example.com + ErrorLog /var/log/httpd/www.example.com-error_log + CustomLog /var/log/httpd/www.example.com-access_log common + # "Other directives here ..." + + +The filter can have the following parameters: + +- ``convert_bools=false`` + + Indicates whether Boolean values presented as a string should be + converted to a real Boolean value. For example ``var1: 'True'`` would + be represented as a string but by using the ``convert_bools=true`` it + will be converted into Boolean like it would be defined like ``var1: + true``. + +- ``convert_nums=false`` + + Indicates whether number presented as a string should be converted to + number. For example ``var1: '123'`` would be represented as a string + but by using the ``convert_nums=true`` it will be converted it to a + number like it would be defined like ``var1: 123``. It's also possible + to use the YAML type casting to convert string to number (e.g. ``!!int + "1234"``, ``!!float "3.14"``). + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + +- ``quote_all_nums=false`` + + Number values are not quoted by default. This parameter will force to + quote all numbers. + +- ``quote_all_strings=false`` + + String values are quoted only if they contain a space. This parameter + will force to quote all strings regardless if the they contain the + space or not. + + +.. _encode-erlang: + +encode_erlang +^^^^^^^^^^^^^ + +This filter helps to create configuration in the Erlang format. The +expected data structure is the following:: + + my_rabbitmq_config: + - rabbit: + - tcp_listeners: + - '"127.0.0.1"': 5672 + - ssl_listeners: + - 5671 + - ssl_options: + - cacertfile: /path/to/testca/cacert.pem + - certfile: /path/to/server/cert.pem + - keyfile: /path/to/server/key.pem + - verify: verify_peer + - fail_if_no_peer_cert: true + +The variable consists of a lists of dictionaries. The value of the key-value +pair can be another list or simple value like a string or a number. Erlang +tuples can be enforced by prepending the value with the special character +specified in the ``atom_value_indicator``. + +The above variable can be used in the template file like this:: + + {{ my_rabbitmq_config | encode_erlang }} + +The output of such template would be:: + + [ + {rabbit, [ + {tcp_listeners, [ + {"127.0.0.1", 5672} + ]}, + {ssl_listeners, [ + 5671 + ]}, + {ssl_options, [ + {cacertfile, "/path/to/testca/cacert.pem"}, + {certfile, "/path/to/server/cert.pem"}, + {keyfile, "/path/to/server/key.pem"}, + {verify, "verify_peer"}, + {fail_if_no_peer_cert, true} + ]} + ]} + ]. + +The filter can have the following parameters: + +- ``atom_value_indicator=":"`` + + The value of this parameter indicates the string which must be + prepended to a string value to treat it as an atom value. + +- ``convert_bools=false`` + + Indicates whether Boolean values presented as a string should be + converted to a real Boolean value. For example ``var1: 'True'`` would + be represented as a string but by using the ``convert_bools=true`` it + will be converted into Boolean like it would be defined like ``var1: + true``. + +- ``convert_nums=false`` + + Indicates whether number presented as a string should be converted to + number. For example ``var1: '123'`` would be represented as a string + but by using the ``convert_nums=true`` it will be converted it to a + number like it would be defined like ``var1: 123``. It's also possible + to use the YAML type casting to convert string to number (e.g. ``!!int + "1234"``, ``!!float "3.14"``). + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + + +.. _encode-haproxy: + +encode_haproxy +^^^^^^^^^^^^^^ + +This filter helps to create configuration in the format used in Haproxy. +The expected data structure is the following:: + + my_haproxy_config: + - global: + - daemon + - maxconn 256 + - "# This is the default section" + - defaults: + - mode http + - timeout connect 5000ms + - timeout client 50000ms + - timeout server 50000ms + - frontend http-in: + - "# This is the bind address/port" + - bind *:80 + - default_backend servers + - backend servers + - server server1 127.0.0.1:8000 maxconn 32 + +The variable is a list which can contain a simple string value or a dictionary +which indicates a section. + +The above variable can be used in the template file like this:: + + {{ my_haproxy_config | encode_haproxy }} + +The output of such template would be:: + + global + daemon + maxconn 256 + + # This is the default section + defaults + mode http + timeout connect 5000ms + timeout client 50000ms + timeout server 50000ms + + frontend http-in + # This is the bind address/port + bind *:80 + default_backend servers + backend servers + server server1 127.0.0.1:8000 maxconn 32 + +The filter can have the following parameters: + +- ``indent=" "`` + + Defines the indentation unit. + + +.. _encode-ini: + +encode_ini +^^^^^^^^^^ + +This filter helps to create configuration in the INI format. The expected +data structure is the following:: + + my_rsyncd_config: + uid: nobody + gid: nobody + use chroot: no + max connections: 4 + syslog facility: local5 + pid file: /run/rsyncd.pid + ftp: + path: /srv/ftp + comment: ftp area + +The variable consist of dictionaries which can be nested. If the value of the +key-value pair on the first level is of a simple type (string, number, boolean), +such pair is considered to be global and gets processed first. If the value of +the key-value pair on the first level is another dictionary, the key is +considered to be the name of the section and the inner dictionary as properties +of the section. + +The above variable can be used in the template file like this:: + + {{ my_rsyncd_config | encode_ini }} + +The output of such template would be:: + + gid=nobody + max connections=4 + pid file=/run/rsyncd.pid + syslog facility=local5 + uid=nobody + use chroot=False + + [ftp] + comment=ftp area + path=/srv/ftp + +The filter can have the following parameters: + +- ``comment="#"`` + + Sign used to comment out lines when `section_is_comment=true`. + +- ``delimiter="="`` + + Sign separating the *property* and the *value*. By default it's set to + ``'='`` but it can also be set for example to ``' = '``. + +- ``quote=""`` + + Sets the quoting of the value. Use ``quote="'"`` or ``quote='"'``. + +- ``section_is_comment=false`` + + If this parameter is set to ``true``, the section value will be used as + a comment for the following properties of the section. + +- ``ucase_prop=false`` + + Indicates whether the *property* should be made upper case. + + +.. _encode-json: + +encode_json +^^^^^^^^^^^ + +This filter helps to create configuration in the JSON format. The +expected data structure is the following:: + + my_sensu_client_config: + client: + name: localhost + address: 127.0.0.1 + subscriptions: + - test + +Because JSON is very similar to YAML, the variable consists of +dictionaries of which value can be either an simple type (number, string, +boolean), list or another dictionary. All can be nested in any number of +levels. + +The above variable can be used in the template file like this:: + + {{ my_sensu_client_config | encode_json }} + +The output of such template would be:: + + { + "client": { + "address": "127.0.0.1", + "name": "localhost", + "subscriptions": [ + "test" + ] + } + } + +The filter can have the following parameters: + +- ``convert_bools=false`` + + Indicates whether Boolean values presented as a string should be + converted to a real Boolean value. For example ``var1: 'True'`` would + be represented as a string but by using the ``convert_bools=true`` it + will be converted into Boolean like it would be defined like ``var1: + true``. + +- ``convert_nums=false`` + + Indicates whether number presented as a string should be converted to + number. For example ``var1: '123'`` would be represented as a string + but by using the ``convert_nums=true`` it will be converted it to a + number like it would be defined like ``var1: 123``. It's also possible + to use the YAML type casting to convert string to number (e.g. ``!!int + "1234"``, ``!!float "3.14"``). + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + + +.. _encode-logstash: + +encode_logstash +^^^^^^^^^^^^^^^ + +This filter helps to create configuration in the format used by Logstash. +The expected data structure is the following:: + + my_logstash_config: + - :input: + - :file: + path: /var/log/httpd/access_log + start_position: beginning + - :filter: + - ':if [path] =~ "access"': + - :mutate: + replace: + type: apache_access + - :grok: + match: + message: "%{COMBINEDAPACHELOG}" + - :date: + match: + - timestamp + - dd/MMM/yyyy:HH:mm:ss Z + - ':else if [path] =~ "error"': + - :mutate: + replace: + type: "apache_error" + - :else: + - :mutate: + replace: + type: "random_logs" + - :output: + - :elasticsearch: + hosts: + - localhost:9200 + - :stdout: + codec: rubydebug + +The variable consists of a list of sections where each section is +prefixed by a special character specified by the ``section_prefix`` +(``:`` by default). The value of the top level sections can be either +another section or a dictionary. The value of the dictionary can be a +simple value, list or another dictionary. + +The above variable can be used in the template file like this:: + + {{ my_logstash_config | encode_logstash }} + +The output of such template would be:: + + input { + file { + path => "/var/log/httpd/access_log" + start_position => "beginning" + } + } + filter { + if [path] =~ "access" { + mutate { + replace => { + "type" => "apache_access" + } + } + grok { + match => { + "message" => "%{COMBINEDAPACHELOG}" + } + } + date { + match => [ + "timestamp", + "dd/MMM/yyyy:HH:mm:ss Z" + ] + } + } + else if [path] =~ "error" { + mutate { + replace => { + "type" => "apache_error" + } + } + } + else { + mutate { + replace => { + "type" => "random_logs" + } + } + } + } + output { + elasticsearch { + hosts => [ + "localhost:9200" + ] + } + stdout { + codec => "rubydebug" + } + } + +The filter can have the following parameters: + +- ``convert_bools=false`` + + Indicates whether Boolean values presented as a string should be + converted to a real Boolean value. For example ``var1: 'True'`` would + be represented as a string but by using the ``convert_bools=true`` it + will be converted into Boolean like it would be defined like ``var1: + true``. + +- ``convert_nums=false`` + + Indicates whether number presented as a string should be converted to + number. For example ``var1: '123'`` would be represented as a string + but by using the ``convert_nums=true`` it will be converted it to a + number like it would be defined like ``var1: 123``. It's also possible + to use the YAML type casting to convert string to number (e.g. ``!!int + "1234"``, ``!!float "3.14"``). + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + +- ``section_prefix=":"`` + + This parameter specifies which character will be used to identify the + Logstash section. + + +.. _encode-nginx: + +encode_nginx +^^^^^^^^^^^^ + +This filter helps to create configuration in the format used by Nginx +wweb server. The expected data structure is the following:: + + my_nginx_vhost_config: + - server: + - listen 80 + - server_name $hostname + - "location /": + - root /srv/www/myapp + - index index.html + +As Nginx configuration is order sensitive, the all configuration is +defined as a nested list. As it would be difficult to recognize how many +elements each configuration definition has, the list item value is no +further separated into key/value dictionary. Every line of the +configuration is treated either as a key indicating another nested list +or simply as a string. + +The above variable can be used in the template file like this:: + + {{ my_nginx_vhost | encode_nginx }} + +The output of such template would be:: + + server { + listen 80; + server_name $hostname; + + location / { + root /srv/www/myapp; + index index.html; + } + } + +The filter can have the following parameters: + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + +- ``block_semicolon=false`` + + Allows to add a semicolon to the end of each block. + + +.. _encode-pam: + +encode_pam +^^^^^^^^^^ + +This filter helps to create configuration in the format user by Linux +Pluggable Authentication Modules (PAM). The expected data structure is +the following:: + + my_system_auth_config: + aa: + type: auth + control: required + path: pam_unix.so + args: + - try_first_pass + - nullok + bb: + type: auth + control: optional + path: pam_permit.so + cc: + type: auth + control: required + path: pam_env.so + dd: + type: account + control: required + path: pam_unix.so + ee: + type: account + control: optional + path: pam_permit.so + ff: + type: account + control: required + path: pam_time.so + gg: + type: password + control: required + path: pam_unix.so + args: + - try_first_pass + - nullok + - sha512 + - shadow + hh: + type: password + control: optional + path: pam_permit.so + args: + ii: + type: session + control: required + path: pam_limits.so + jj: + type: session + control: required + path: pam_unix.so + kk: + type: session + control: optional + path: pam_permit.so + +The variable is a dictionary of which the key is a labels and the value +is the PAM rule. The label is used to order the PAM rules. Using labels +with even number of characters allows to insert another rule in between +of any two rules. + +The above variable can be used in the template file like this:: + + {{ my_system_auth_config | encode_pam }} + +The output of such template would be:: + + auth required pam_unix.so try_first_pass nullok + auth optional pam_permit.so + auth required pam_env.so + + account required pam_unix.so + account optional pam_permit.so + account required pam_time.so + + password required pam_unix.so try_first_pass nullok sha512 shadow + password optional pam_permit.so + + session required pam_limits.so + session required pam_unix.so + session optional pam_permit.so + +The filter can have the following parameters: + +- ``print_label=false`` + + Print labels as a comment in the output. + +- ``separate_types=true`` + + Add a newline between the groups of types. + +- ``separator=" "`` + + Separator between the collection of tokens. + + +.. _encode-toml: + +encode_toml +^^^^^^^^^^^ + +This filter helps to create configuration in the TOML format. The +expected data structure is the following:: + + my_grafana_ldap_config: + verbose_logging: false + servers: + - host: 127.0.0.1 + port: 389 + use_ssl: false + ssl_skip_verify: false + bind_dn: cn=admin,dc=grafana,dc=org + bind_password: grafana + search_filter: "(cn=%s)" + search_base_dns: + - dc=grafana,dc=org + servers.attributes: + name: givenName + surname: sn + username: cn + member_of: memberOf + email: email + servers.group_mappings: + - group_dn: cn=admins,dc=grafana,dc=org + org_role: Admin + - group_dn: cn=users,dc=grafana,dc=org + org_role: Editor + - group_dn: "*" + org_role: Viewer + +The variable is a dictionary of which value can be either a simple type +(number, string, boolean), list or another dictionary. The dictionaries +and lists can be nested. + +The above variable can be used in the template file like this:: + + {{ my_grafana_ldap_config | encode_toml }} + +The output of such template would be:: + + verbose_logging = false + + [[servers]] + bind_dn = "cn=admin,dc=grafana,dc=org" + bind_password = "grafana" + host = "127.0.0.1" + port = 389 + search_base_dns = ["dc=grafana,dc=org"] + search_filter = "(cn=%s)" + ssl_skip_verify = false + use_ssl = false + + [servers.attributes] + email = "email" + member_of = "memberOf" + name = "givenName" + surname = "sn" + username = "cn" + + [[servers.group_mappings]] + group_dn = "cn=admins,dc=grafana,dc=org" + org_role = "Admin" + + [[servers.group_mappings]] + group_dn = "cn=users,dc=grafana,dc=org" + org_role = "Editor" + + [[servers.group_mappings]] + group_dn = "*" + org_role = "Viewer" + +The filter can have the following parameters: + +- ``convert_bools=false`` + + Indicates whether Boolean values presented as a string should be + converted to a real Boolean value. For example ``var1: 'True'`` would + be represented as a string but by using the ``convert_bools=true`` it + will be converted into Boolean like it would be defined like ``var1: + true``. + +- ``convert_nums=false`` + + Indicates whether number presented as a string should be converted to + number. For example ``var1: '123'`` would be represented as a string + but by using the ``convert_nums=true`` it will be converted it to a + number like it would be defined like ``var1: 123``. It's also possible + to use the YAML type casting to convert string to number (e.g. ``!!int + "1234"``, ``!!float "3.14"``). + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + +- ``quote='"'`` + + Sets the quoting of the value. Use ``quote="'"`` or ``quote='"'``. + + +.. _encode-xml: + +encode_xml +^^^^^^^^^^ + +This filter helps to create configuration in the XML format. The expected +data structure is the following:: + + my_oddjob_config: + - oddjobconfig: + - service: + - ^name: com.redhat.oddjob + - object: + - ^name: /com/redhat/oddjob + - interface: + - ^name: com.redhat.oddjob + - method: + - ^name: listall + - allow: + - ^min_uid: 0 + - ^max_uid: 0 + - method: + - ^name: list + - allow: '' + - method: + - ^name: quit + - allow: + - ^user: root + - method: + - ^name: reload + - allow: + - ^user: root + - include: + - ^ignore_missing: "yes" + - /etc/oddjobd.conf.d/*.conf + - include: + - ^ignore_missing: "yes" + - /etc/oddjobd-local.conf + +The variable can be a list of dictionaries, lists or strings. This config +encoder does not handle mixed content very well so the safest way how to +include mixed content is to define it as a string and use the parameter +``escape_xml=false``. This config encoder also produces no XML declaration. +Any XML declaration or DOCTYPE must be a part of the template file. + +The above variable can be used in the template file like this:: + + {{ my_oddjob_config | encode_xml }} + +The output of such template would be:: + + + + + + + + + + + + + + + + + + + + + /etc/oddjobd.conf.d/*.conf + /etc/oddjobd-local.conf + + +The filter can have the following parameters: + +- ``attribute_sign="^"`` + + XML attribute indicator. + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + + +.. _encode-yaml: + +encode_yaml +^^^^^^^^^^^ + +This filter helps to create configuration in the YAML format. The +expected data structure is the following:: + + my_mongodb_config: + systemLog: + destination: file + logAppend: true + path: /var/log/mongodb/mongod.log + storage: + dbPath: /var/lib/mongo + journal: + enabled: true + processManagement: + fork: true + pidFilePath: /var/run/mongodb/mongod.pid + net: + port: 27017 + bindIp: 127.0.0.1 + +The variable is ordinary YAML. The only purpose of this encoder filter is +to be able to convert YAML data structure into the string in a template +file in unified way compatible with the other config encoders. + +The above variable can be used in the template file like this:: + + {{ my_mongodb_config | encode_yaml }} + +The output of such template would be:: + + net: + bindIp: "127.0.0.1" + port: 27017 + processManagement: + fork: true + pidFilePath: "/var/run/mongodb/mongod.pid" + storage: + dbPath: "/var/lib/mongo" + journal: + enabled: true + systemLog: + destination: "file" + logAppend: true + path: "/var/log/mongodb/mongod.log" + +The filter can have the following parameters: + +- ``convert_bools=false`` + + Indicates whether Boolean values presented as a string should be + converted to a real Boolean value. For example ``var1: 'True'`` would + be represented as a string but by using the ``convert_bools=true`` it + will be converted into Boolean like it would be defined like ``var1: + true``. + +- ``convert_nums=false`` + + Indicates whether number presented as a string should be converted to + number. For example ``var1: '123'`` would be represented as a string + but by using the ``convert_nums=true`` it will be converted it to a + number like it would be defined like ``var1: 123``. It's also possible + to use the YAML type casting to convert string to number (e.g. ``!!int + "1234"``, ``!!float "3.14"``). + +- ``indent=" "`` + + Defines the indentation unit. + +- ``level=0`` + + Indicates the initial level of the indentation. Value ``0`` starts + indenting from the beginning of the line. Setting the value to higher + than ``0`` indents the content by ``indent * level``. + +- ``quote='"'`` + + Sets the quoting of the value. Use ``quote="'"`` or ``quote='"'``. + + +.. _Utilities: + +Utilities +--------- + +The followng is a list of utilities that can be used in conjunction with the +Config Encoder filters. + + +.. _template-replace: + +template_replace +^^^^^^^^^^^^^^^^ + +This filter allows to use extra templating layer which gets processed during +the template file processing. That can be useful if it's necessary to create +repetitive but slightly different definitions inside the template file. + +The extra templating layer is represented by a templating variable which +contains specially decorated variables which get replaced by its real value at +the time of template file processing. The template variable can be composed +dynamically which provides extra flexibility that would otherwise have to be +hardcoded in the template file. + +The filter expects the template variable containing the specially decorated +variables as its input. The filter has one parameter which is used to replaced +the specially decorated variables in the template variable. + +Let's have a look at an example of such usage:: + + # The variable used as the replacement in the template variable + my_clients: + - host: myclient01 + jobdefs: Default + password: Passw0rd1 + file_retention: 30 days + - host: myclient02 + jobdefs: HomeOnly + password: Passw0rd2 + file_retention: 90 days + + # The actual template variable used in the template file + bacula_director_config_job_client: + # First template variable containing the specially decorated variables + - template: + - Job: + - Name = Job-{[{ item['jobdefs'] }]}-{[{ item['host'] }]} + - Client = {[{ item['host'] }]}-fd + - JobDefs = {[{ item['jobdefs'] }]} + # Variable used to replace the specially decorated variables + items: "{{ my_clients }}" + # Second template and its items + - template: + - Client: + - Name = {[{ item['host'] }]}-fd + - Address = {[{ item['host'] }]} + - FD Port = 9102 + - Catalog = Default + - Password = {[{ item['password'] }]} + - File Retention = {[{ item['file_retention'] }]} + - Job Retention = 3 months + - AutoPrune = yes + items: "{{ my_clients }}" + +The above variable can be used together with the `template_replace` filter in +the template file (``bacula-dir.conf.j2``) like this:: + + {% for record in bacula_director_config_job_client %} + {%- for item in record['items'] -%} + {{ record['template'] | template_replace(item) | encode_nginx }}{{ "\n" }} + {%- endfor -%} + {% endfor %} + +The template file can be called from the playbook/role like this:: + + - name: Configure Bacula Director + template: + src: bacula-dir.conf.j2 + dest: /etc/bacula/bacula-dir.conf + +And the result of such usage is the following:: + + Job { + Name = Job-Default-myclient01; + Client = myclient01-fd; + JobDefs = Default; + } + + Job { + Name = Job-HomeOnly-myclient02; + Client = myclient02-fd; + JobDefs = HomeOnly; + } + + Client { + Name = myclient01-fd; + Address = myclient01; + FD Port = 9102; + Catalog = Default; + Password = Passw0rd1; + File Retention = 30 days; + Job Retention = 3 months; + AutoPrune = yes; + } + + Client { + Name = myclient02-fd; + Address = myclient02; + FD Port = 9102; + Catalog = Default; + Password = Passw0rd2; + File Retention = 90 days; + Job Retention = 3 months; + AutoPrune = yes; + } + + +.. _License: + +License +------- + +GPLv3 + + +.. _Author: + +Author +------ + +Jiri Tyr diff --git a/playbooks/roles/config-encoders/filter_plugins/config_encoders.py b/playbooks/roles/config-encoders/filter_plugins/config_encoders.py new file mode 100644 index 00000000000..0980488729c --- /dev/null +++ b/playbooks/roles/config-encoders/filter_plugins/config_encoders.py @@ -0,0 +1,1074 @@ +# (c) 2016, Jiri Tyr +# +# This file is part of Config Encoder Filters (CEF) +# +# CEF is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# CEF is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with CEF. If not, see . + +""" +Config Encoder Filters + +More information: https://github.com/jtyr/ansible-config_encoder_filters +""" + +from ansible import errors +from copy import copy +import re +import six + + +def _str_is_bool(data): + """Verify if data is boolean.""" + + return re.match(r"^(true|false)$", str(data), flags=re.IGNORECASE) + + +def _str_is_int(data): + """Verify if data is integer.""" + + return re.match(r"^[-+]?(0|[1-9][0-9]*)$", str(data)) + + +def _str_is_float(data): + """Verify if data is float.""" + + return re.match( + r"^[-+]?(0|[1-9][0-9]*)(\.[0-9]*)?(e[-+]?[0-9]+)?$", + str(data), flags=re.IGNORECASE) + + +def _str_is_num(data): + """Verify if data is either integer or float.""" + + return _str_is_int(data) or _str_is_float(data) + + +def _is_num(data): + """Verify if data is either int or float. + + Could be replaced by: + + from numbers import Number as number + isinstance(data, number) + + but that requires Python v2.6+. + """ + + return isinstance(data, int) or isinstance(data, float) + + +def _escape(data, quote='"', format=None): + """Escape special characters in a string.""" + + if format == 'xml': + return ( + str(data). + replace('&', '&'). + replace('<', '<'). + replace('>', '>')) + elif format == 'control': + return ( + str(data). + replace('\b', '\\b'). + replace('\f', '\\f'). + replace('\n', '\\n'). + replace('\r', '\\r'). + replace('\t', '\\t')) + elif quote is not None and len(quote): + return str(data).replace('\\', '\\\\').replace(quote, "\\%s" % quote) + else: + return data + + +def encode_apache( + data, convert_bools=False, convert_nums=False, indent=" ", level=0, + quote_all_nums=False, quote_all_strings=False, block_type='sections'): + """Convert Python data structure to Apache format.""" + + # Return value + rv = "" + + if block_type == 'sections': + for c in data['content']: + # First check if this section has options + if 'options' in c: + rv += encode_apache( + c['options'], + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + quote_all_nums=quote_all_nums, + quote_all_strings=quote_all_strings, + block_type='options') + + is_empty = False + + # Check if this section has some sub-sections + if 'sections' in c: + for s in c['sections']: + # Check for empty sub-sections + for i in s['content']: + if ( + ('options' in i and len(i['options']) > 0) or + ('sections' in i and len(i['sections']) > 0)): + is_empty = True + + if is_empty: + rv += "%s<%s " % (indent * level, s['name']) + + if 'operator' in s: + rv += "%s " % s['operator'] + + if 'param' in s: + rv += encode_apache( + s['param'], + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + quote_all_nums=quote_all_nums, + quote_all_strings=quote_all_strings, + block_type='value') + + rv += ">\n" + rv += encode_apache( + s, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + quote_all_nums=quote_all_nums, + quote_all_strings=quote_all_strings, + block_type='sections') + rv += "%s\n" % (indent * level, s['name']) + + # If not last item of the loop + if c['sections'][-1] != s: + rv += "\n" + + if ( + data['content'][-1] != c and ( + 'options' in c and len(c['options']) > 0 or ( + 'sections' in c and + len(c['sections']) > 0 and + is_empty))): + rv += "\n" + + elif block_type == 'options': + for o in data: + for key, val in sorted(o.items()): + rv += "%s%s " % (indent * (level-1), key) + rv += encode_apache( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + quote_all_nums=quote_all_nums, + quote_all_strings=quote_all_strings, + block_type='value') + rv += "\n" + + elif block_type == 'value': + if isinstance(data, bool) or convert_bools and _str_is_bool(data): + # Value is a boolean + + rv += str(data).lower() + + elif ( + _is_num(data) or + (convert_nums and _str_is_num(data))): + # Value is a number + if quote_all_nums: + rv += '"%s"' % data + else: + rv += str(data) + + elif isinstance(data, str): + # Value is a string + if ( + quote_all_strings or + " " in data or + "\t" in data or + "\n" in data or + "\r" in data or + data == ""): + + rv += '"%s"' % _escape(data) + else: + rv += data + + elif isinstance(data, list): + # Value is a list + for v in data: + rv += encode_apache( + v, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + quote_all_nums=quote_all_nums, + quote_all_strings=quote_all_strings, + block_type='value') + + # If not last item of the loop + if data[-1] != v: + rv += " " + + return rv + + +def encode_erlang( + data, atom_value_indicator=":", convert_bools=False, + convert_nums=False, indent=" ", level=0): + """Convert Python data structure to Erlang format.""" + + # Return value + rv = "" + + if isinstance(data, dict): + # It's a dict + + rv += "\n" + + for key, val in sorted(data.items()): + rv += "%s{%s," % (indent*level, key) + + if not isinstance(val, dict): + rv += " " + + rv += encode_erlang( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1) + + rv += "}" + elif ( + data == "null" or + _is_num(data) or + isinstance(data, bool) or + (convert_nums and _str_is_num(data)) or + (convert_bools and _str_is_bool(data))): + # It's null, number or boolean + + rv += str(data).lower() + + elif isinstance(data, str): + # It's a string + + atom_len = len(atom_value_indicator) + + if ( + len(data) > atom_len and + data[0:atom_len] == atom_value_indicator): + + # Atom configuration value + rv += data[atom_len:] + else: + rv += '"%s"' % _escape(data) + + else: + # It's a list + + rv += "[" + + for val in data: + if ( + isinstance(val, str) or + _is_num(val)): + rv += "\n%s" % (indent*level) + + rv += encode_erlang( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1) + + if data[-1] == val: + # Last item of the loop + rv += "\n" + else: + rv += "," + + if len(data) > 0: + rv += "%s]" % (indent * (level-1)) + else: + rv += "]" + + if level == 0: + rv += ".\n" + + return rv + + +def encode_haproxy(data, indent=" "): + """Convert Python data structure to HAProxy format.""" + + # Return value + rv = "" + # Indicates first loop + first = True + # Indicates whether the previous section was a comment + prev_comment = False + + for section in data: + if first: + first = False + elif prev_comment: + prev_comment = False + else: + # Print empty line between sections + rv += "\n" + + if isinstance(section, dict): + # It's a section + rv += "%s\n" % list(section.keys())[0] + + # Process all parameters of the section + for param in list(section.values())[0]: + rv += "%s%s\n" % (indent, param) + else: + # It's a comment of a parameter + rv += "%s\n" % section + prev_comment = True + + return rv + + +def encode_ini( + data, comment="#", delimiter=" = ", quote="", section_is_comment=False, + ucase_prop=False): + """Convert Python data structure to INI format.""" + + # Return value + rv = "" + + # First process all standalone properties + for prop, val in sorted(data.items()): + if ucase_prop: + prop = prop.upper() + + vals = [] + + if isinstance(val, list): + vals = val + elif not isinstance(val, dict): + vals = [val] + + for item in vals: + if item is not None: + rv += "%s%s%s%s%s\n" % ( + prop, delimiter, quote, _escape(item, quote), quote) + + # Then process all sections + for section, props in sorted(data.items()): + if isinstance(props, dict): + if rv != "": + rv += "\n" + + if section_is_comment: + rv += "%s %s\n" % (comment, section) + else: + rv += "[%s]\n" % (section) + + # Let process all section options as standalone properties + rv += encode_ini( + props, + delimiter=delimiter, + quote=quote, + section_is_comment=section_is_comment, + ucase_prop=ucase_prop) + + return rv + + +def encode_json( + data, convert_bools=False, convert_nums=False, indent=" ", level=0): + """Convert Python data structure to JSON format.""" + + # Return value + rv = "" + + if isinstance(data, dict): + # It's a dict + + rv += "{" + + if len(data) > 0: + rv += "\n" + + items = sorted(data.items()) + + for key, val in items: + rv += '%s"%s": ' % (indent * (level+1), key) + rv += encode_json( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1) + + # Last item of the loop + if items[-1] == (key, val): + rv += "\n" + else: + rv += ",\n" + + if len(data) > 0: + rv += "%s}" % (indent * level) + else: + rv += "}" + + if level == 0: + rv += "\n" + + elif ( + data == "null" or + _is_num(data) or + (convert_nums and _str_is_num(data)) or + (convert_bools and _str_is_bool(data))): + # It's a number, null or boolean + + rv += str(data).lower() + + elif isinstance(data, str): + # It's a string + + rv += '"%s"' % _escape(_escape(data), format='control') + + else: + # It's a list + + rv += "[" + + if len(data) > 0: + rv += "\n" + + for val in data: + rv += indent * (level+1) + rv += encode_json( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1) + + # Last item of the loop + if data[-1] == val: + rv += "\n" + else: + rv += ",\n" + + if len(data) > 0: + rv += "%s]" % (indent * level) + else: + rv += "]" + + return rv + + +def encode_logstash( + data, convert_bools=False, convert_nums=False, indent=" ", level=0, + prevtype="", section_prefix=":"): + """Convert Python data structure to Logstash format.""" + + # Return value + rv = "" + + if isinstance(data, dict): + # The item is a dict + + if prevtype in ('value', 'value_hash', 'array'): + rv += "{\n" + + items = sorted(data.items()) + + for key, val in items: + if key[0] == section_prefix: + rv += "%s%s {\n" % (indent * level, key[1:]) + rv += encode_logstash( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + prevtype='block') + + # Last item of the loop + if items[-1] == (key, val): + if ( + isinstance(val, str) or + _is_num(val) or + isinstance(val, bool) or ( + isinstance(val, dict) and + list(val.keys())[0][0] != section_prefix)): + rv += "\n%s}\n" % (indent * level) + else: + rv += "%s}\n" % (indent * level) + else: + rv += indent * level + + if prevtype == 'value_hash': + rv += '"%s" => ' % key + else: + rv += "%s => " % key + + rv += encode_logstash( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + prevtype=( + 'value_hash' if isinstance(val, dict) else 'value')) + + if ( + items[-1] != (key, val) and ( + isinstance(val, str) or + _is_num(val) or + isinstance(val, bool))): + rv += "\n" + + if prevtype in ('value', 'value_hash', 'array'): + rv += "\n%s}" % (indent * (level-1)) + + if prevtype in ('value', 'value_array'): + rv += "\n" + + elif ( + _is_num(data) or + isinstance(data, bool) or + (convert_nums and _str_is_num(data)) or + (convert_bools and _str_is_bool(data))): + # It's number or boolean + + rv += str(data).lower() + + elif isinstance(data, str): + # It's a string + + rv += '"%s"' % _escape(data) + + else: + # It's a list + + for val in data: + if isinstance(val, dict) and list(val.keys())[0][0] == section_prefix: + # Value is a block + + rv += encode_logstash( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level, + prevtype='block') + else: + # First item of the loop + if data[0] == val: + rv += "[\n" + + rv += indent * level + rv += encode_logstash( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + prevtype='array') + + # Last item of the loop + if data[-1] == val: + rv += "\n%s]" % (indent * (level-1)) + else: + rv += ",\n" + + return rv + + +def encode_nginx(data, indent=" ", level=0, block_semicolon=False): + """Convert Python data structure to Nginx format.""" + + # Return value + rv = "" + # Indicates the item type [section|line] + item_type = "" + + for item in data: + if isinstance(item, dict): + # Section + if item_type in ('section', 'line'): + rv += "\n" + + rv += "%s%s {\n" % (level*indent, list(item.keys())[0]) + rv += encode_nginx( + list(item.values())[0], + level=level+1, + block_semicolon=block_semicolon) + rv += "%s}%s\n" % (level*indent, ';' if block_semicolon else '') + + item_type = 'section' + + elif isinstance(item, str): + # Normal line + if item_type == 'section': + rv += "\n" + + item_type = 'line' + + rv += "%s%s" % (level*indent, item) + + # Do not finish comments with semicolon + if item.startswith("# "): + rv += "\n" + else: + rv += ";\n" + + else: + raise errors.AnsibleFilterError( + "Unexpected data type: %s" % (type(item))) + + return rv + + +def encode_pam( + data, print_label=False, separate_types=True, separator=" "): + """Convert Python data structure to PAM format.""" + + # Return value + rv = "" + # Remember previous type to make newline between type blocks + prev_type = None + + for label, rule in sorted(data.items()): + if separate_types: + # Add extra newline to separate blocks of the same type + if prev_type is not None and prev_type != rule['type']: + rv += "\n" + + prev_type = rule['type'] + + if print_label: + rv += "# %s\n" % label + + if 'service' in rule: + rv += "%s%s" % (rule['service'], separator) + + if 'silent' in rule and rule['silent']: + rv += '-' + + rv += "%s%s" % (rule['type'], separator) + + if isinstance(rule['control'], list): + rv += "[%s]%s" % ( + " ".join( + ["=".join(map(str, k)) for k in [list(x.items())[0] for x in rule['control']]]), + separator) + else: + rv += "%s%s" % (rule['control'], separator) + + rv += rule['path'] + + if 'args' in rule and rule['args']: + rv += separator + + for i, arg in enumerate(rule['args']): + if i > 0: + rv += ' ' + + if isinstance(arg, dict): + rv += "=".join(map(str, list(arg.items())[0])) + else: + rv += arg + + rv += "\n" + + return rv + + +def encode_toml( + data, convert_bools=False, convert_nums=False, first=True, + indent=" ", level=0, prevkey="", prevtype="", quote='"'): + """Convert Python data structure to TOML format.""" + + # Return value + rv = "" + + if isinstance(data, dict): + # It's a dict + + # First process all standalone strings, numbers, booleans and lists + for key, val in sorted(data.items()): + if ( + isinstance(val, str) or + _is_num(val) or + isinstance(val, bool) or ( + isinstance(val, list) and + len(val) > 0 and + not isinstance(val[0], dict))): + # The value is string, number, boolean or list + + rv += "%s%s = " % (indent * level, key) + rv += encode_toml( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + first=first, + indent=indent, + level=level, + prevkey=prevkey) + + first = False + + # Then process all data structures + for key, val in sorted(data.items()): + if ( + isinstance(val, dict) or + isinstance(val, list) and isinstance(val[0], dict)): + + # Values for the next recursive call + tmp_prevkey = prevkey + tmp_level = level + + if isinstance(val, dict): + # The val is a dict + if prevkey != "" and prevkey != key: + tmp_level += 1 + + if re.match(r'^[a-zA-Z0-9_-]+$', key) is None: + key = '"%s"' % key + + if prevkey == "": + tmp_prevkey = key + else: + tmp_prevkey = "%s.%s" % (prevkey, key) + + if not first: + rv += "\n" + + rv += "%s[%s]\n" % (indent * tmp_level, tmp_prevkey) + elif isinstance(val[0], dict): + # The val is a table + if re.match(r'^[a-zA-Z0-9_-]+$', key) is None: + key = '"%s"' % key + + if prevkey == "": + tmp_prevkey = key + else: + tmp_prevkey = "%s.%s" % (prevkey, key) + + tmp_level += 1 + + rv += encode_toml( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + first=first, + indent=indent, + level=tmp_level, + prevkey=tmp_prevkey) + + first = False + + elif ( + _is_num(data) or + isinstance(data, bool) or + (convert_nums and _str_is_num(data)) or + (convert_bools and _str_is_bool(data))): + # It's number or boolean + + rv += str(data).lower() + + if prevtype != 'list': + rv += "\n" + + elif isinstance(data, str): + # It's a string + + rv += "%s%s%s" % ( + quote, _escape(data, quote), quote) + + if prevtype != 'list': + rv += "\n" + + else: + # It's a list + + if isinstance(data[0], dict): + for d in data: + rv += "\n%s[[%s]]\n" % (indent * level, prevkey) + rv += encode_toml( + d, + convert_bools=convert_bools, + convert_nums=convert_nums, + first=first, + indent=indent, + level=level) + else: + rv += "[" + + for d in data: + rv += encode_toml( + d, + convert_bools=convert_bools, + convert_nums=convert_nums, + first=first, + indent=indent, + level=level, + prevtype='list') + + # Last item of the loop + if data[-1] != d: + rv += ", " + + rv += "]" + + if prevtype != 'list': + rv += "\n" + + return rv + + +def encode_xml( + data, attribute_sign="^", escape_xml=True, indent=" ", level=0): + """Convert Python data structure to XML format.""" + + # Return value + rv = "" + + if isinstance(data, list): + # Pocess anything what's not attribute + for item in data: + if ( + not ( + isinstance(item, dict) and + list(item.keys())[0].startswith(attribute_sign))): + rv += encode_xml( + item, + attribute_sign=attribute_sign, + indent=indent, + level=level, + escape_xml=escape_xml) + elif isinstance(data, dict): + # It's eiher an attribute or an element + + key, val = list(data.items())[0] + + if key.startswith(attribute_sign): + # Process attribute + rv += ' %s="%s"' % (key[1:], _escape(val)) + else: + # Process element + rv = '%s<%s' % (level*indent, key) + + # Check if there are any attributes + if isinstance(val, list): + num_attrs = 0 + + for item in val: + if ( + isinstance(item, dict) and + list(item.keys())[0].startswith(attribute_sign)): + num_attrs += 1 + rv += encode_xml( + item, + attribute_sign=attribute_sign, + indent=indent, + level=level) + + if val == '' or (isinstance(val, list) and num_attrs == len(val)): + # Close the element as empty + rv += " />\n" + else: + # Close the element as normal + rv += ">" + + # Check if the value is text + val_not_text = False + + if isinstance(val, list): + # Check if it contains only attributes and a text value + for item in val: + if ( + isinstance(item, dict) and + not list(item.keys())[0].startswith(attribute_sign)): + val_not_text = True + break + elif isinstance(val, dict): + val_not_text = True + + if val_not_text: + rv += "\n" + + # Process inner content of the element + rv += encode_xml( + val, + attribute_sign=attribute_sign, + indent=indent, + level=level+1, + escape_xml=escape_xml) + + if val_not_text: + rv += level*indent + + rv += "\n" % key + else: + # It's a string + + rv += "%s" % _escape(data, format=('xml' if escape_xml else None)) + + return rv + + +def encode_yaml( + data, convert_bools=False, convert_nums=False, indent=" ", level=0, + quote='"', skip_indent=False): + """Convert Python data structure to YAML format.""" + + # Return value + rv = "" + + if isinstance(data, dict): + # It's a dictionary + + if len(list(data.keys())) == 0: + rv += "{}\n" + else: + for i, (key, val) in enumerate(sorted(data.items())): + # Skip indentation only for the first pair + rv += "%s%s:" % ("" if i == 0 and skip_indent else level*indent, key) + + if isinstance(val, dict) and len(list(val.keys())) == 0: + rv += " {}\n" + else: + if ( + isinstance(val, dict) or ( + isinstance(val, list) and + len(val) != 0)): + rv += "\n" + else: + rv += " " + + rv += encode_yaml( + val, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + quote=quote) + + elif isinstance(data, list): + # It's a list + + if len(data) == 0: + rv += "[]\n" + else: + for item in data: + list_indent = "%s- " % (level*indent) + + rv += "%s%s" % (list_indent, encode_yaml( + item, + convert_bools=convert_bools, + convert_nums=convert_nums, + indent=indent, + level=level+1, + quote=quote, + skip_indent=True)) + + elif ( + data == "null" or + isinstance(data, bool) or + (convert_bools and _str_is_bool(data))): + # It's a boolean + + rv += "%s\n" % str(data).lower() + + elif ( + _is_num(data) or + (convert_nums and _str_is_num(data))): + # It's a number + + rv += "%s\n" % str(data) + + else: + # It's a string + + rv += "%s%s%s\n" % (quote, _escape(data, quote), quote) + + return rv + + +def __eval_replace(match): + """Evaluate the real value of the variable specified as a string.""" + + ret = '__item' + ret += ''.join(match.groups()[1:]) + + # Try to evaluate the value of the special string + try: + ret = eval(ret) + except Exception: + # Return empty string if something went wrong + ret = '' + + return str(ret) + + +def template_replace(data, replacement): + """Replace special template decorated variable with its real value.""" + + # Make the replacement variable visible for the __eval_replace function + global __item + __item = replacement + + # Clone the data to keep the original untouched + local_data = copy(data) + + # Walk through the data structure and try to replace all special strings + if isinstance(local_data, list): + local_data = [template_replace(x, replacement) for x in local_data] + elif isinstance(local_data, dict): + for key, val in local_data.items(): + local_data[key] = template_replace(val, replacement) + elif isinstance(local_data, str): + # Replace the special string by it's evaluated value + p = re.compile(r'\{\[\{\s*(\w+)([^}\s]+|)\s*\}\]\}') + local_data = p.sub(__eval_replace, local_data) + + return local_data + + +class FilterModule: + """Ansible encoder Jinja2 filters.""" + + def filters(self): + """Expose filters to ansible.""" + + return { + 'encode_apache': encode_apache, + 'encode_erlang': encode_erlang, + 'encode_haproxy': encode_haproxy, + 'encode_ini': encode_ini, + 'encode_json': encode_json, + 'encode_logstash': encode_logstash, + 'encode_nginx': encode_nginx, + 'encode_pam': encode_pam, + 'encode_toml': encode_toml, + 'encode_xml': encode_xml, + 'encode_yaml': encode_yaml, + 'template_replace': template_replace, + } diff --git a/playbooks/roles/config-encoders/meta/main.yml b/playbooks/roles/config-encoders/meta/main.yml new file mode 100644 index 00000000000..23d65c7ef45 --- /dev/null +++ b/playbooks/roles/config-encoders/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/playbooks/roles/course_authoring/defaults/main.yml b/playbooks/roles/course_authoring/defaults/main.yml new file mode 100644 index 00000000000..c2b03296969 --- /dev/null +++ b/playbooks/roles/course_authoring/defaults/main.yml @@ -0,0 +1,3 @@ +course_authoring_env_extra: + STUDIO_BASE_URL: 'https://studio-{{common_deploy_hostname}}/' + diff --git a/playbooks/roles/course_authoring/meta/main.yml b/playbooks/roles/course_authoring/meta/main.yml new file mode 100644 index 00000000000..60343ce5141 --- /dev/null +++ b/playbooks/roles/course_authoring/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx \ No newline at end of file diff --git a/playbooks/roles/course_authoring/tasks/main.yml b/playbooks/roles/course_authoring/tasks/main.yml new file mode 100644 index 00000000000..bcf0fb3ae72 --- /dev/null +++ b/playbooks/roles/course_authoring/tasks/main.yml @@ -0,0 +1,5 @@ +- name: Build Course Authoring MFE + include_role: + name: mfe + vars: + MFE_ENVIRONMENT_EXTRA: '{{ course_authoring_env_extra | default(MFE_DEPLOY_ENVIRONMENT_EXTRA) }}' \ No newline at end of file diff --git a/playbooks/roles/credentials/defaults/main.yml b/playbooks/roles/credentials/defaults/main.yml new file mode 100644 index 00000000000..e965d975e03 --- /dev/null +++ b/playbooks/roles/credentials/defaults/main.yml @@ -0,0 +1,186 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role credentials +# + + +# +# vars are namespace with the module name. +# +credentials_service_name: 'credentials' + +credentials_environment: + CREDENTIALS_CFG: '{{ COMMON_CFG_DIR }}/{{ credentials_service_name }}.yml' + +credentials_gunicorn_port: 8150 +CREDENTIALS_NODE_VERSION: '16.14.0' +CREDENTIALS_NPM_VERSION: '8.5.5' + +# +# OS packages +# + +credentials_debian_pkgs: + # Needed to manipulate images. + - libjpeg8-dev + +credentials_release_specific_debian_pkgs: + xenial: + - libpng12-dev + bionic: + - libpng-dev + focal: + - libpng-dev + +credentials_redhat_pkgs: [] + +CREDENTIALS_NGINX_PORT: '1{{ credentials_gunicorn_port }}' +CREDENTIALS_SSL_NGINX_PORT: '4{{ credentials_gunicorn_port }}' + +CREDENTIALS_DEFAULT_DB_NAME: 'credentials' +CREDENTIALS_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +CREDENTIALS_MYSQL_USER: 'credentials001' +CREDENTIALS_MYSQL_PASSWORD: 'password' +CREDENTIALS_MYSQL_CONN_MAX_AGE: 60 + +CREDENTIALS_MEMCACHE: [ 'memcache' ] + +CREDENTIALS_DJANGO_SETTINGS_MODULE: 'credentials.settings.production' +CREDENTIALS_DOMAIN: 'credentials' +CREDENTIALS_URL_ROOT: 'http://{{ CREDENTIALS_DOMAIN }}:{{ CREDENTIALS_NGINX_PORT }}' +CREDENTIALS_LOGOUT_URL: '{{ CREDENTIALS_URL_ROOT }}/logout/' + +CREDENTIALS_SECRET_KEY: 'SET-ME-TO-A-UNIQUE-LONG-RANDOM-STRING' +CREDENTIALS_LANGUAGE_CODE: 'en' +CREDENTIALS_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +# Used to automatically configure OAuth2 Client +CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'credentials-sso-key' +CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'credentials-sso-secret' +CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'credentials-backend-service-key' +CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'credentials-backend-service-secret' +CREDENTIALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +CREDENTIALS_SERVICE_USER: 'credentials_service_user' + +CREDENTIALS_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ credentials_service_name }}' + +# TODO: Let edx_django_service manage CREDENTIALS_STATIC_ROOT in phase 2. +CREDENTIALS_STATIC_ROOT: '{{ CREDENTIALS_DATA_DIR }}/staticfiles' + +CREDENTIALS_MEDIA_ROOT: '{{ CREDENTIALS_DATA_DIR }}/media' +CREDENTIALS_MEDIA_URL: '/media/' +CREDENTIALS_STATIC_URL: '/static/' + +CREDENTIALS_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ CREDENTIALS_MEDIA_ROOT }}' + MEDIA_URL: '{{ CREDENTIALS_MEDIA_URL }}' + +# NOTE: This service is one of the few that stores its static files on S3. We use a backend that adds a hash to the +# filename to avoid overwriting older files, which may be in use, with newer files during deployments. See +# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#manifeststaticfilesstorage for more information. +CREDENTIALS_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' + +# NOTE: This only needs to be overridden when using non-local storage. Otherwise, the edx_django_service play will +# properly configure local storage of media and static files. +CREDENTIALS_FILE_STORAGE_BACKEND: {} + + +CREDENTIALS_CORS_ORIGIN_ALLOW_ALL: false +CREDENTIALS_CORS_ORIGIN_WHITELIST_DEFAULT: + - '{{ CREDENTIALS_DOMAIN }}' + - '{{ CREDENTIALS_LEARNER_RECORD_MFE_HOSTNAME }}' + +CREDENTIALS_CORS_ORIGIN_WHITELIST_EXTRA: [] +CREDENTIALS_CORS_ORIGIN_WHITELIST: '{{ CREDENTIALS_CORS_ORIGIN_WHITELIST_DEFAULT + CREDENTIALS_CORS_ORIGIN_WHITELIST_EXTRA }}' + +CREDENTIALS_VERSION: 'master' + +CREDENTIALS_GUNICORN_EXTRA: '' + +CREDENTIALS_EXTRA_APPS: [] + +CREDENTIALS_EXTRA_REQUIREMENTS: [] + +CREDENTIALS_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +CREDENTIALS_CERTIFICATE_LANGUAGES: + 'en': 'English' + 'es_419': 'Spanish' + +CREDENTIALS_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + +credentials_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ CREDENTIALS_CERTIFICATE_LANGUAGES }}' + CREDENTIALS_SERVICE_USER: '{{ CREDENTIALS_SERVICE_USER }}' + FILE_STORAGE_BACKEND: '{{ CREDENTIALS_FILE_STORAGE_BACKEND }}' + LANGUAGE_COOKIE_NAME: '{{ CREDENTIALS_LANGUAGE_COOKIE_NAME }}' + USE_LEARNER_RECORD_MFE: '{{ CREDENTIALS_USE_LEARNER_RECORD_MFE }}' + LEARNER_RECORD_MFE_RECORDS_PAGE_URL: "https://{{ CREDENTIALS_LEARNER_RECORD_MFE_HOSTNAME }}/" + CSRF_COOKIE_SECURE: "{{ CREDENTIALS_CSRF_COOKIE_SECURE }}" + CSRF_TRUSTED_ORIGINS: "{{ CREDENTIALS_CSRF_TRUSTED_ORIGINS }}" + USERNAME_REPLACEMENT_WORKER: "{{ CREDENTIALS_USERNAME_REPLACEMENT_WORKER }}" + VERIFIABLE_CREDENTIALS: "{{ CREDENTIALS_VERIFIABLE_CREDENTIALS }}" + +# See edx_django_service_automated_users for an example of what this should be +CREDENTIALS_AUTOMATED_USERS: {} + +credentials_create_demo_data: false + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) +CREDENTIALS_LMS_URL_ROOT: !!null +CREDENTIALS_DISCOVERY_API_URL: !!null + +CREDENTIALS_CSRF_COOKIE_SECURE: false +CREDENTIALS_CSRF_TRUSTED_ORIGINS: + - "{{ CREDENTIALS_LEARNER_RECORD_MFE_HOSTNAME }}" + +CREDENTIALS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +credentials_post_migrate_commands: + - command: './manage.py create_or_update_site --site-id=1 --site-domain={{ CREDENTIALS_DOMAIN }} --site-name="Open edX" --platform-name="Open edX" --company-name="Open edX" --lms-url-root={{ CREDENTIALS_LMS_URL_ROOT }} --catalog-api-url={{ CREDENTIALS_DISCOVERY_API_URL }} --tos-url={{ CREDENTIALS_LMS_URL_ROOT }}/tos --privacy-policy-url={{ CREDENTIALS_LMS_URL_ROOT }}/privacy --homepage-url={{ CREDENTIALS_LMS_URL_ROOT }} --certificate-help-url={{ CREDENTIALS_LMS_URL_ROOT }}/faq --records-help-url={{ CREDENTIALS_LMS_URL_ROOT }}/faq --theme-name=openedx' + when: '{{ credentials_create_demo_data }}' + + + +# Remote config +CREDENTIALS_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +CREDENTIALS_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +CREDENTIALS_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +CREDENTIALS_ENABLE_ADMIN_URLS_RESTRICTION: false +CREDENTIALS_ADMIN_URLS: + - admin + +CREDENTIALS_USE_LEARNER_RECORD_MFE: false +CREDENTIALS_LEARNER_RECORD_MFE_HOSTNAME: "learner-record.mfe.CHANGE-ME" + +# NOTE: Optional Verifiable Credentials feature +# Documentation can be found at +# https://edx-credentials.readthedocs.io/en/latest/verifiable_credentials/overview.html +CREDENTIALS_VERIFIABLE_CREDENTIALS: + DEFAULT_DATA_MODELS: + - "credentials.apps.verifiable_credentials.composition.verifiable_credentials.VerifiableCredentialsDataModel" + - "credentials.apps.verifiable_credentials.composition.open_badges.OpenBadgesDataModel" + DEFAULT_STORAGES: + - "credentials.apps.verifiable_credentials.storages.learner_credential_wallet.LCWallet" + DEFAULT_ISSUER: + NAME: "Default (system-wide)" + ID: "generate-me-with-didkit-lib" + KEY: "generate-me-with-didkit-lib" + DEFAULT_ISSUANCE_REQUEST_SERIALIZER: "credentials.apps.verifiable_credentials.issuance.serializers.IssuanceLineSerializer" + DEFAULT_RENDERER: "credentials.apps.verifiable_credentials.issuance.renderers.JSONLDRenderer" + STATUS_LIST_STORAGE: "credentials.apps.verifiable_credentials.storages.status_list.StatusList2021" + STATUS_LIST_DATA_MODEL: "credentials.apps.verifiable_credentials.composition.status_list.StatusListDataModel" + STATUS_LIST_LENGTH: 10000 diff --git a/playbooks/roles/credentials/meta/main.yml b/playbooks/roles/credentials/meta/main.yml new file mode 100644 index 00000000000..26436591839 --- /dev/null +++ b/playbooks/roles/credentials/meta/main.yml @@ -0,0 +1,54 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role credentials +# +dependencies: + - role: edx_django_service + edx_django_service_use_python38: true + edx_django_service_version: '{{ CREDENTIALS_VERSION }}' + edx_django_service_name: '{{ credentials_service_name }}' + edx_django_service_config_overrides: '{{ credentials_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: "{{ credentials_debian_pkgs + credentials_release_specific_debian_pkgs[ansible_distribution_release] }}" + edx_django_service_gunicorn_port: '{{ credentials_gunicorn_port }}' + edx_django_service_django_settings_module: '{{ CREDENTIALS_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ credentials_environment }}' + edx_django_service_gunicorn_extra: '{{ CREDENTIALS_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ CREDENTIALS_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ CREDENTIALS_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ CREDENTIALS_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ CREDENTIALS_SECRET_KEY }}' + edx_django_service_staticfiles_storage: '{{ CREDENTIALS_STATICFILES_STORAGE }}' + edx_django_service_media_storage_backend: '{{ CREDENTIALS_MEDIA_STORAGE_BACKEND }}' + edx_django_service_memcache: '{{ CREDENTIALS_MEMCACHE }}' + edx_django_service_default_db_host: '{{ CREDENTIALS_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ CREDENTIALS_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ CREDENTIALS_MYSQL_USER }}' + edx_django_service_db_password: '{{ CREDENTIALS_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ CREDENTIALS_MYSQL_CONN_MAX_AGE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_social_auth_redirect_is_https: '{{ CREDENTIALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' + edx_django_service_extra_apps: '{{ CREDENTIALS_EXTRA_APPS }}' + edx_django_service_extra_requirements: '{{ CREDENTIALS_EXTRA_REQUIREMENTS }}' + edx_django_service_session_expire_at_browser_close: '{{ CREDENTIALS_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_node_version: '{{ CREDENTIALS_NODE_VERSION }}' + edx_django_service_npm_version: '{{ CREDENTIALS_NPM_VERSION }}' + edx_django_service_automated_users: '{{ CREDENTIALS_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ CREDENTIALS_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ credentials_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ CREDENTIALS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ CREDENTIALS_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ CREDENTIALS_COPY_CONFIG_ENABLED }}' + EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: '{{ CREDENTIALS_ENABLE_ADMIN_URLS_RESTRICTION }}' + EDX_DJANGO_SERVICE_ADMIN_URLS: '{{ CREDENTIALS_ADMIN_URLS }}' diff --git a/playbooks/roles/credentials/tasks/main.yml b/playbooks/roles/credentials/tasks/main.yml new file mode 100644 index 00000000000..5816e28f256 --- /dev/null +++ b/playbooks/roles/credentials/tasks/main.yml @@ -0,0 +1,22 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role credentials +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# diff --git a/playbooks/roles/datadog-uninstall/defaults/main.yml b/playbooks/roles/datadog-uninstall/defaults/main.yml new file mode 100644 index 00000000000..c6b06f891e0 --- /dev/null +++ b/playbooks/roles/datadog-uninstall/defaults/main.yml @@ -0,0 +1,2 @@ +datadog_uninstall_apt_key: "0x382E94DE" +DATADOG_UNINSTALL_UBUNTU_APT_KEYSERVER: "http://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=" diff --git a/playbooks/roles/datadog-uninstall/tasks/main.yml b/playbooks/roles/datadog-uninstall/tasks/main.yml new file mode 100644 index 00000000000..d6e6d3d2fa7 --- /dev/null +++ b/playbooks/roles/datadog-uninstall/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Remove apt key for datadog + apt_key: + id: "382E94DE" + url: "{{ DATADOG_UNINSTALL_UBUNTU_APT_KEYSERVER }}{{ datadog_uninstall_apt_key }}" + state: absent + +- name: Uninstall apt repository for datadog + apt_repository: + repo: 'deb http://apt.datadoghq.com/ stable main' + state: absent + + +- name: Uninstall datadog agent + apt: + name: "datadog-agent" + state: absent diff --git a/playbooks/roles/datadog/defaults/main.yml b/playbooks/roles/datadog/defaults/main.yml index dd6fa244fb5..a557191cfc9 100644 --- a/playbooks/roles/datadog/defaults/main.yml +++ b/playbooks/roles/datadog/defaults/main.yml @@ -1,15 +1,5 @@ --- -datadog_api_key: "PUT_YOUR_API_KEY_HERE" - -datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52" - -datadog_debian_pkgs: - - apparmor-utils - - build-essential - - curl - - g++ - - gcc - - ipython - - pkg-config - - rsyslog +datadog_api_key: "{{ datadog_api_key }}" +datadog_config: "{{ datadog_config }}" +datadog_checks: "{{ datadog_checks }}" diff --git a/playbooks/roles/datadog/files/etc/yum.repo.d/datadog.repo b/playbooks/roles/datadog/files/etc/yum.repo.d/datadog.repo deleted file mode 100644 index ee60e140827..00000000000 --- a/playbooks/roles/datadog/files/etc/yum.repo.d/datadog.repo +++ /dev/null @@ -1,5 +0,0 @@ -[datadog] -name = Datadog, Inc. -baseurl = http://yum.datadoghq.com/rpm/ -enabled=1 -gpgcheck=0 \ No newline at end of file diff --git a/playbooks/roles/datadog/handlers/main.yml b/playbooks/roles/datadog/handlers/main.yml deleted file mode 100644 index 82b83c4cd5a..00000000000 --- a/playbooks/roles/datadog/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -- name: restart the datadog service - service: name=datadog-agent state=restarted diff --git a/playbooks/roles/datadog/tasks/main.yml b/playbooks/roles/datadog/tasks/main.yml index e149397d6a1..d55f5c04277 100644 --- a/playbooks/roles/datadog/tasks/main.yml +++ b/playbooks/roles/datadog/tasks/main.yml @@ -1,5 +1,4 @@ --- - # # datadog # @@ -15,43 +14,11 @@ # - datadog # -- name: install debian needed pkgs - apt: pkg={{ item }} - with_items: datadog_debian_pkgs - tags: - - datadog - -- name: add apt key - apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present - tags: - - datadog - -- name: install apt repository - apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes - tags: - - datadog - -- name: install datadog agent - apt: pkg="datadog-agent" - tags: - - datadog - -- name: bootstrap config - shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf - tags: - - datadog - -- name: update api-key - lineinfile: > - dest="/etc/dd-agent/datadog.conf" - regexp="^api_key:.*" - line="api_key:{{ datadog_api_key }}" - notify: - - restart the datadog service - tags: - - datadog +- name: Install Datadog role from Ansible Galaxy + ansible.builtin.command: ansible-galaxy install datadog.datadog + delegate_to: localhost + become: false -- name: ensure started and enabled - service: name=datadog-agent state=started enabled=yes - tags: - - datadog +- name: Install datadog Agent + include_role: + name: datadog.datadog diff --git a/playbooks/roles/dbt_docs_nginx/README.md b/playbooks/roles/dbt_docs_nginx/README.md new file mode 100644 index 00000000000..38d2be6d57e --- /dev/null +++ b/playbooks/roles/dbt_docs_nginx/README.md @@ -0,0 +1,13 @@ +### Configurations for provisioning the dbt docs server + +### Main role: +This is the main role that defines all the tasks necessary for provisioning the dbtdocs server. It is used by the `deploy_nginx_for_dbt_docs.yml` playbook. + +### Dependencies: +This role uses the `aws`, `aws_cloudwatch_agent` and `nginx` as dependencies. Therefore, it uses all the default values of those roles. + +### Variables: +The following variables are required by this role: +- `hostname_variable`: This is the string hostname value that comes before `.edx.org`. For instance, to provision the server for the full hostname `hello.world.edx.org`, then set `hostname_variable` to `hello.world`. +- `s3_bucket`: This is the name of the S3 bucket where the compiled html files are stored. + diff --git a/playbooks/roles/dbt_docs_nginx/defaults/main.yml b/playbooks/roles/dbt_docs_nginx/defaults/main.yml new file mode 100644 index 00000000000..56770b09eec --- /dev/null +++ b/playbooks/roles/dbt_docs_nginx/defaults/main.yml @@ -0,0 +1,2 @@ +# Populate the cloudwatch_procstat_patterns with patterns that you want to pass to the procstat config. +cloudwatch_procstat_patterns: ['nginx', 'cloudwatch-agent'] diff --git a/playbooks/roles/dbt_docs_nginx/meta/main.yml b/playbooks/roles/dbt_docs_nginx/meta/main.yml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/playbooks/roles/dbt_docs_nginx/tasks/main.yml b/playbooks/roles/dbt_docs_nginx/tasks/main.yml new file mode 100644 index 00000000000..82952e46f6b --- /dev/null +++ b/playbooks/roles/dbt_docs_nginx/tasks/main.yml @@ -0,0 +1,89 @@ +--- + +- name: Update the cache and update all packages + apt: + name: "*" + state: latest + update_cache: yes + tags: + - install + - install:base + +- name: Uninstall AWS CLI from the package manager to avoid conflicts with the aws role + apt: + name: awscli + state: absent + tags: + - install + - install:aws + +- name: Copy dbt-docs html files from S3 to the local machine + shell: 'aws s3 sync s3://edx-dbt-docs/ /usr/share/nginx/html/ --delete --include "*"' + become: yes + become_method: sudo + tags: + - install + - install:docs + +- name: Make a directory to store cron scripts + file: + path: /home/server-config/ + state: directory + mode: 0755 + become: yes + become_method: sudo + tags: + - install + - install:base + +- name: Copy the nginx configuration file + template: + src: "nginx/nginx.conf.j2" + dest: /etc/nginx/nginx.conf + become: yes + become_method: sudo + tags: + - install + - install:nginx + +- name: Make the dbt-files-sync.sh cron script + template: + src: "nginx/dbt_files_sync.sh.j2" + dest: /home/server-config/dbt_files_sync.sh + mode: "a+x" + become: yes + become_method: sudo + tags: + - install + - install:cronjob + +- name: Set up cron job to update the html files from S3 + cron: + name: Run the /home/server-config/dbt_files_sync.sh script + minute: 30 + hour: "*/6" + job: /home/server-config/dbt_files_sync.sh + tags: + - install + - install:cronjob + +- name: Restart nginx and enable it on reboot + service: + name: nginx + state: restarted + enabled: yes + tags: + - install + - install:nginx + +# Add the jenkins user's ssh public key to the running user's autorized keys +# This is needed so that this jenkins instance can be used to update system users +- name: Add the jenkins user's ssh public key to the running user's autorized keys + lineinfile: + path: /home/{{ ansible_ssh_user }}/.ssh/authorized_keys + create: yes + line: "{{ lookup('file', jenkins_ssh_public_keyfile) }}" + when: jenkins_ssh_public_keyfile is defined and jenkins_ssh_public_keyfile + tags: + - ssh + - ssh:keys diff --git a/playbooks/roles/dbt_docs_nginx/templates/nginx/dbt_files_sync.sh.j2 b/playbooks/roles/dbt_docs_nginx/templates/nginx/dbt_files_sync.sh.j2 new file mode 100644 index 00000000000..93374c4e13f --- /dev/null +++ b/playbooks/roles/dbt_docs_nginx/templates/nginx/dbt_files_sync.sh.j2 @@ -0,0 +1,2 @@ +#!/bin/bash +sudo aws s3 sync s3://{{ s3_bucket }}/ /usr/share/nginx/html/ --delete --include "*" \ No newline at end of file diff --git a/playbooks/roles/dbt_docs_nginx/templates/nginx/nginx.conf.j2 b/playbooks/roles/dbt_docs_nginx/templates/nginx/nginx.conf.j2 new file mode 100644 index 00000000000..f69111a859e --- /dev/null +++ b/playbooks/roles/dbt_docs_nginx/templates/nginx/nginx.conf.j2 @@ -0,0 +1,64 @@ +user www-data; +worker_processes auto; +pid /run/nginx.pid; +include /etc/nginx/modules-enabled/*.conf; + +events { + worker_connections 768; + # multi_accept on; +} + +http { + + ## + # Basic Settings + ## + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + # server_tokens off; + + # server_names_hash_bucket_size 64; + # server_name_in_redirect off; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ## + # SSL Settings + ## + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE + ssl_prefer_server_ciphers on; + + ## + # Logging Settings + ## + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + ## + # Gzip Settings + ## + + gzip on; + + + ## + # Virtual Host Configs + ## + + include /etc/nginx/conf.d/*.conf; + include /etc/nginx/sites-enabled/*; + server { + server_name {{ hostname_variable }}.edx.org; + if ($http_x_forwarded_proto = "http") { + return 301 https://$host$request_uri; + } + root /usr/share/nginx/html; + } +} diff --git a/playbooks/roles/demo/defaults/main.yml b/playbooks/roles/demo/defaults/main.yml index 7065619b26d..7dd36b86e72 100644 --- a/playbooks/roles/demo/defaults/main.yml +++ b/playbooks/roles/demo/defaults/main.yml @@ -2,32 +2,50 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Defaults for role demo # +DEMO_CREATE_STAFF_USER: true + demo_app_dir: "{{ COMMON_APP_DIR }}/demo" -demo_code_dir: "{{ demo_app_dir }}/edx-demo-course" +demo_code_dir: "{{ demo_app_dir }}/edx-demo-course/" +demo_course_dir: "{{ demo_code_dir }}/demo-course/course" demo_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-demo-course.git" -demo_course_id: 'edX/Open_DemoX/edx_demo_course' -demo_version: "master" +demo_course_id: 'course-v1:OpenedX+DemoX+DemoCourse' +DEMO_VERSION: "master" +demo_hashed_password: 'pbkdf2_sha256$20000$TjE34FJjc3vv$0B7GUmH8RwrOc/BvMoxjb5j8EgnWTt3sxorDANeF7Qw=' # edx demo_test_users: - email: 'honor@example.com' - mode: honor - password: edx + username: honor + hashed_password: "{{ demo_hashed_password }}" + is_staff: false + is_superuser: false - email: 'audit@example.com' - mode: audit - password: edx + username: audit + hashed_password: "{{ demo_hashed_password }}" + is_staff: false + is_superuser: false - email: 'verified@example.com' - mode: verified - password: edx - + username: verified + hashed_password: "{{ demo_hashed_password }}" + is_staff: false + is_superuser: false +demo_staff_user: + email: 'staff@example.com' + username: staff + hashed_password: "{{ demo_hashed_password }}" + is_staff: true + is_superuser: false +SANDBOX_EDXAPP_USERS: [] demo_edxapp_user: 'edxapp' -demo_edxapp_venv_bin: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/venvs/{{demo_edxapp_user}}/bin' -demo_edxapp_course_data_dir: '{{COMMON_DATA_DIR}}/{{demo_edxapp_user}}/data' -demo_edxapp_code_dir: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/edx-platform' +demo_edxapp_settings: '{{ COMMON_EDXAPP_SETTINGS }}' +demo_edxapp_venv_bin: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/venvs/{{demo_edxapp_user}}/bin' +demo_edxapp_course_data_dir: '{{ COMMON_DATA_DIR }}/{{ demo_edxapp_user }}/data' +demo_edxapp_code_dir: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/edx-platform' +demo_edxapp_env: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/edxapp_env' diff --git a/playbooks/roles/demo/tasks/deploy.yml b/playbooks/roles/demo/tasks/deploy.yml index f76956b94c1..67d7d20d576 100644 --- a/playbooks/roles/demo/tasks/deploy.yml +++ b/playbooks/roles/demo/tasks/deploy.yml @@ -1,42 +1,48 @@ --- - name: check out the demo course - git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }} - sudo_user: "{{ demo_edxapp_user }}" + git: + dest: "{{ demo_code_dir }}" + repo: "{{ demo_repo }}" + version: "{{ DEMO_VERSION }}" + accept_hostkey: yes + become_user: "{{ demo_edxapp_user }}" register: demo_checkout - name: import demo course - shell: > - {{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }} - chdir={{ demo_edxapp_code_dir }} - sudo_user: "{{ common_web_user }}" + shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings={{ demo_edxapp_settings }} import {{ demo_edxapp_course_data_dir }} {{ demo_course_dir }}" + args: + chdir: "{{ demo_edxapp_code_dir }}" + become_user: "{{ common_web_user }}" when: demo_checkout.changed -- name: create some test users and enroll them in the course - shell: > - {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }} - chdir={{ demo_edxapp_code_dir }} - sudo_user: "{{ common_web_user }}" - with_items: demo_test_users - when: demo_checkout.changed +- name: build staff and test user list + set_fact: + demo_test_and_staff_users: "{{ demo_test_users + [demo_staff_user] }}" + when: DEMO_CREATE_STAFF_USER -- name: create staff user - shell: > - {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }} - chdir={{ demo_edxapp_code_dir }} - sudo_user: "{{ common_web_user }}" - when: demo_checkout.changed +- name: build staff and test user list + set_fact: + demo_test_and_staff_users: "{{ demo_test_users }}" + when: not DEMO_CREATE_STAFF_USER + +- name: build staff, admin, and test user list + set_fact: + demo_test_admin_and_staff_users: "{{ demo_test_and_staff_users + SANDBOX_EDXAPP_USERS }}" -- name: add test users to the certificate whitelist - shell: > - {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }} - chdir={{ demo_edxapp_code_dir }} - with_items: demo_test_users +- name: create some test users + shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}{% if item.is_staff %} --staff{% endif %}{% if item.is_superuser %} --superuser{% endif %}" + args: + chdir: "{{ demo_edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + with_items: "{{ demo_test_admin_and_staff_users }}" when: demo_checkout.changed -- name: seed the forums for the demo course - shell: > - {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }} - chdir={{ demo_edxapp_code_dir }} - with_items: demo_test_users +- name: enroll test users in the demo course + shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}" + args: + chdir: "{{ demo_edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + with_items: + - "{{ demo_test_and_staff_users }}" when: demo_checkout.changed diff --git a/playbooks/roles/demo/tasks/main.yml b/playbooks/roles/demo/tasks/main.yml index 29287e5bb2b..992099d15f9 100644 --- a/playbooks/roles/demo/tasks/main.yml +++ b/playbooks/roles/demo/tasks/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -14,7 +14,7 @@ # Overview: # # Imports the demo course into studio -# https://github.com/edx/edx-demo-course +# https://github.com/openedx/edx-demo-course # # Once imported this role will only re-import the course # if the edx-demo-course repo has been updated @@ -31,8 +31,10 @@ # - demo - name: create demo app and data dirs - file: > - path="{{ demo_app_dir }}" state=directory - owner="{{ demo_edxapp_user }}" group="{{ common_web_group }}" + file: + path: "{{ demo_app_dir }}" + state: directory + owner: "{{ demo_edxapp_user }}" + group: "{{ common_web_group }}" - include: deploy.yml tags=deploy diff --git a/playbooks/roles/designer/defaults/main.yml b/playbooks/roles/designer/defaults/main.yml new file mode 100644 index 00000000000..f991bed1172 --- /dev/null +++ b/playbooks/roles/designer/defaults/main.yml @@ -0,0 +1,130 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role designer +# + + +# +# vars are namespace with the module name. +# +designer_service_name: 'designer' + +designer_environment: + DESIGNER_CFG: '{{ COMMON_CFG_DIR }}/{{ designer_service_name }}.yml' + +designer_gunicorn_port: 8808 + +designer_debian_pkgs: [] + +DESIGNER_USE_PYTHON38: True + +DESIGNER_NGINX_PORT: '1{{ designer_gunicorn_port }}' +DESIGNER_SSL_NGINX_PORT: '4{{ designer_gunicorn_port }}' + +DESIGNER_DEFAULT_DB_NAME: 'designer' +DESIGNER_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +DESIGNER_MYSQL_USER: 'designer001' +DESIGNER_MYSQL_PASSWORD: 'password' +DESIGNER_MYSQL_CONN_MAX_AGE: 60 + +DESIGNER_MEMCACHE: [ 'memcache' ] + +DESIGNER_DJANGO_SETTINGS_MODULE: 'designer.settings.production' +DESIGNER_DOMAIN: 'localhost' +DESIGNER_URL_ROOT: 'http://{{ DESIGNER_DOMAIN }}:{{ DESIGNER_NGINX_PORT }}' +DESIGNER_LOGOUT_URL: '{{ DESIGNER_URL_ROOT }}/logout/' + +DESIGNER_LANGUAGE_CODE: 'en' +DESIGNER_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +DESIGNER_SERVICE_USER: 'designer_service_user' + +DESIGNER_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ designer_service_name }}' +DESIGNER_MEDIA_ROOT: '{{ DESIGNER_DATA_DIR }}/media' +DESIGNER_MEDIA_URL: '/media/' + +DESIGNER_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ DESIGNER_MEDIA_ROOT }}' + MEDIA_URL: '{{ DESIGNER_MEDIA_URL }}' + +# TODO: Let edx_django_service manage DESIGNER_STATIC_ROOT in phase 2. +DESIGNER_STATIC_ROOT: '{{ DESIGNER_DATA_DIR }}/staticfiles' +DESIGNER_STATIC_URL: '/static/' + +DESIGNER_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +DESIGNER_CORS_ORIGIN_ALLOW_ALL: false +DESIGNER_CORS_ORIGIN_WHITELIST_DEFAULT: + - '{{ DESIGNER_DOMAIN }}' + +DESIGNER_CORS_ORIGIN_WHITELIST_EXTRA: [] +DESIGNER_CORS_ORIGIN_WHITELIST: '{{ DESIGNER_CORS_ORIGIN_WHITELIST_DEFAULT + DESIGNER_CORS_ORIGIN_WHITELIST_EXTRA }}' + +DESIGNER_VERSION: 'master' + +DESIGNER_GUNICORN_EXTRA: '' + +DESIGNER_EXTRA_APPS: [] + +DESIGNER_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +DESIGNER_CERTIFICATE_LANGUAGES: + 'en': 'English' + 'es_419': 'Spanish' + +designer_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ DESIGNER_CERTIFICATE_LANGUAGES }}' + DESIGNER_SERVICE_USER: '{{ DESIGNER_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ DESIGNER_LANGUAGE_COOKIE_NAME }}' + CSRF_COOKIE_SECURE: "{{ DESIGNER_CSRF_COOKIE_SECURE }}" + +# See edx_django_service_automated_users for an example of what this should be +DESIGNER_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) +DESIGNER_LMS_URL_ROOT: !!null +DESIGNER_DISCOVERY_API_URL: !!null + +DESIGNER_CSRF_COOKIE_SECURE: false + +DESIGNER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +designer_post_migrate_commands: [] + +DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'designer-sso-key' +DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'designer-sso-secret' +DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'designer-backend-service-key' +DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'designer-backend-service-secret' +DESIGNER_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +DESIGNER_GIT_IDENTITY: !!null + +designer_home: "{{ COMMON_APP_DIR }}/{{ designer_service_name }}" +designer_code_dir: "{{ designer_home }}/{{ designer_service_name }}" + +DESIGNER_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ COMMON_GIT_PATH }}' + REPO: 'portal-designer.git' + VERSION: '{{ DESIGNER_VERSION }}' + DESTINATION: "{{ designer_code_dir }}" + SSH_KEY: '{{ DESIGNER_GIT_IDENTITY }}' + +DESIGNER_SECRET_KEY: 'SET-ME-PLEASE' + +# Remote config +DESIGNER_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +DESIGNER_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +DESIGNER_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/designer/meta/main.yml b/playbooks/roles/designer/meta/main.yml new file mode 100644 index 00000000000..b0fb6fc1711 --- /dev/null +++ b/playbooks/roles/designer/meta/main.yml @@ -0,0 +1,50 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role ansible-role-django-ida + +dependencies: + - role: edx_django_service + edx_django_service_use_python38: '{{ DESIGNER_USE_PYTHON38 }}' + edx_django_service_use_python3: '{{ not DESIGNER_USE_PYTHON38 }}' + edx_django_service_version: '{{ DESIGNER_VERSION }}' + edx_django_service_name: '{{ designer_service_name }}' + edx_django_service_config_overrides: '{{ designer_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ designer_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ designer_gunicorn_port }}' + edx_django_service_repos: '{{ DESIGNER_REPOS }}' + edx_django_service_django_settings_module: '{{ DESIGNER_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ designer_environment }}' + edx_django_service_gunicorn_extra: '{{ DESIGNER_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ DESIGNER_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ DESIGNER_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ DESIGNER_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ DESIGNER_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ DESIGNER_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ DESIGNER_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ DESIGNER_MEMCACHE }}' + edx_django_service_default_db_host: '{{ DESIGNER_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ DESIGNER_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ DESIGNER_MYSQL_USER }}' + edx_django_service_db_password: '{{ DESIGNER_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ DESIGNER_MYSQL_CONN_MAX_AGE }}' + edx_django_service_extra_apps: '{{ DESIGNER_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ DESIGNER_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ DESIGNER_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ DESIGNER_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ designer_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ DESIGNER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ DESIGNER_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ DESIGNER_COPY_CONFIG_ENABLED }}' diff --git a/playbooks/roles/designer/tasks/main.yml b/playbooks/roles/designer/tasks/main.yml new file mode 100644 index 00000000000..21299e9c842 --- /dev/null +++ b/playbooks/roles/designer/tasks/main.yml @@ -0,0 +1,22 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role designer +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# diff --git a/playbooks/roles/devpi/defaults/main.yml b/playbooks/roles/devpi/defaults/main.yml deleted file mode 100644 index 61c2ef05105..00000000000 --- a/playbooks/roles/devpi/defaults/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -devpi_app_dir: "{{ COMMON_APP_DIR }}/devpi" -devpi_data_dir: "{{ COMMON_DATA_DIR }}/devpi" -devpi_mirror_dir: "{{ devpi_data_dir }}/data" -devpi_log_dir: "{{ COMMON_LOG_DIR }}/devpi" -devpi_venvs_dir: "{{ devpi_app_dir }}/venvs" -devpi_venv_dir: "{{ devpi_venvs_dir }}/devpi" -devpi_venv_bin: "{{ devpi_venv_dir }}/bin" -devpi_pip_pkgs: - - devpi-server - - eventlet -devpi_nginx_port: 80 -devpi_port: 4040 -devpi_user: devpi -devpi_group: devpi -devpi_server_name: 'pypy.*' - -devpi_supervisor_user: devpi.supervisor -devpi_supervisor_app_dir: "{{ devpi_app_dir }}/supervisor" -devpi_supervisor_cfg_dir: "{{ devpi_supervisor_app_dir }}/conf.d" -devpi_supervisor_data_dir: "{{ devpi_data_dir }}/supervisor" -devpi_supervisor_cfg: "{{ devpi_supervisor_app_dir }}/supervisord.conf" -devpi_supervisor_log_dir: "{{ devpi_log_dir }}/supervisor" -devpi_supervisor_venv_dir: "{{ devpi_app_dir }}/venvs/supervisor" -devpi_supervisor_venv_bin: "{{ devpi_supervisor_venv_dir }}/bin" -devpi_supervisor_ctl: "{{ devpi_supervisor_venv_bin }}/supervisorctl" diff --git a/playbooks/roles/devpi/handlers/main.yml b/playbooks/roles/devpi/handlers/main.yml deleted file mode 100644 index 09a573a23f2..00000000000 --- a/playbooks/roles/devpi/handlers/main.yml +++ /dev/null @@ -1,20 +0,0 @@ -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Defaults for role devpi -# ---- -- name: restart devpi - supervisorctl_local: > - state=restarted - supervisorctl_path={{ devpi_supervisor_ctl }} - config={{ devpi_supervisor_cfg }} - name=devpi-server - sudo_user: "{{ devpi_supervisor_user }}" diff --git a/playbooks/roles/devpi/meta/main.yml b/playbooks/roles/devpi/meta/main.yml deleted file mode 100644 index 16b005b9350..00000000000 --- a/playbooks/roles/devpi/meta/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -dependencies: - - role: supervisor - supervisor_app_dir: "{{ devpi_supervisor_app_dir }}" - supervisor_data_dir: "{{ devpi_supervisor_data_dir }}" - supervisor_log_dir: "{{ devpi_supervisor_log_dir }}" - supervisor_venv_dir: "{{ devpi_supervisor_venv_dir }}" - supervisor_service_user: "{{ devpi_supervisor_user }}" - supervisor_service: "supervisor.devpi" - supervisor_http_bind_port: '9002' diff --git a/playbooks/roles/devpi/tasks/main.yml b/playbooks/roles/devpi/tasks/main.yml deleted file mode 100644 index 13562007778..00000000000 --- a/playbooks/roles/devpi/tasks/main.yml +++ /dev/null @@ -1,114 +0,0 @@ -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role devpi -# -# Overview: -# Creates a pypi caching server -# -# Dependencies: -# - common -# - nginx -# - supervisor -# -# Example play: -# roles: -# - common -# - role: nginx -# nginx_sites: -# - devpi -# - role: supervisor -# supervisor_servers: -# - devpi -# - devpi - ---- -- name: create devpi user - user: > - name={{ devpi_user }} - shell=/bin/false createhome=no - notify: restart devpi - -- name: create devpi application directories - file: > - path={{ item }} - state=directory - owner={{ devpi_user }} - group={{ devpi_supervisor_user }} - with_items: - - "{{ devpi_app_dir }}" - - "{{ devpi_venv_dir }}" - notify: restart devpi - -- name: create the devpi data directory, needs write access by the service user - file: > - path={{ item }} - state=directory - owner={{ devpi_supervisor_user }} - group={{ devpi_user }} - with_items: - - "{{ devpi_data_dir }}" - - "{{ devpi_mirror_dir }}" - notify: restart devpi - -- name: install devpi pip pkgs - pip: > - name={{ item }} - state=present - virtualenv={{ devpi_venv_dir }} - sudo_user: "{{ devpi_user }}" - with_items: devpi_pip_pkgs - notify: restart devpi - -- name: writing supervisor script - template: > - src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf - owner={{ devpi_user }} group={{ devpi_user }} mode=0644 - notify: restart devpi - -- name: create a symlink for venv python, pip - file: > - src="http://github.com/%7B%7B%20devpi_venv_bin%20%7D%7D/%7B%7B%20item%20%7D%7D" - dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi - state=link - notify: restart devpi - with_items: - - python - - pip - -- name: create a symlink for venv supervisor - file: > - src="http://github.com/%7B%7B%20devpi_supervisor_venv_bin%20%7D%7D/supervisorctl" - dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi - state=link - -- name: create a symlink for supervisor config - file: > - src="http://github.com/%7B%7B%20devpi_supervisor_app_dir%20%7D%7D/supervisord.conf" - dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi - state=link - - # call supervisorctl update. this reloads - # the supervisorctl config and restarts - # the services if any of the configurations - # have changed. - # -- name: update devpi supervisor configuration - shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update" - register: supervisor_update - changed_when: supervisor_update.stdout != "" - -- name: ensure devpi is started - supervisorctl_local: > - state=started - supervisorctl_path={{ devpi_supervisor_ctl }} - config={{ devpi_supervisor_cfg }} - name=devpi-server - sudo_user: "{{ devpi_supervisor_user }}" diff --git a/playbooks/roles/devpi/templates/devpi.conf.j2 b/playbooks/roles/devpi/templates/devpi.conf.j2 deleted file mode 100644 index a5b715493f3..00000000000 --- a/playbooks/roles/devpi/templates/devpi.conf.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[program:devpi-server] -command={{ devpi_venv_bin }}/devpi-server --port {{ devpi_port }} --serverdir {{ devpi_mirror_dir }} -user={{ devpi_supervisor_user }} -priority=999 -stdout_logfile={{ devpi_supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ devpi_supervisor_log_dir }}/%(program_name)-stderr.log -autostart=True -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/discern/defaults/main.yml b/playbooks/roles/discern/defaults/main.yml deleted file mode 100644 index cf423c6d93d..00000000000 --- a/playbooks/roles/discern/defaults/main.yml +++ /dev/null @@ -1,115 +0,0 @@ -DISCERN_NGINX_PORT: 18070 -DISCERN_MEMCACHE: [ 'localhost:11211' ] -DISCERN_AWS_ACCESS_KEY_ID: "" -DISCERN_AWS_SECRET_ACCESS_KEY: "" -DISCERN_BROKER_URL: "" -DISCERN_RESULT_BACKEND: "" -DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID: "" -DISCERN_MYSQL_DB_NAME: 'discern' -DISCERN_MYSQL_USER: 'discern001' -DISCERN_MYSQL_PASSWORD: 'password' -DISCERN_MYSQL_HOST: 'localhost' -DISCERN_MYSQL_PORT: '3306' -DISCERN_LANG: "en_US.UTF-8" - - -discern_app_dir: "{{ COMMON_APP_DIR }}/discern" -discern_code_dir: "{{ discern_app_dir }}/discern" -discern_data_dir: "{{ COMMON_DATA_DIR }}/discern" -discern_venvs_dir: "{{ discern_app_dir }}/venvs" -discern_venv_dir: "{{ discern_venvs_dir }}/discern" -discern_venv_bin: "{{ discern_venv_dir }}/bin" -discern_pre_requirements_file: "{{ discern_code_dir }}/pre-requirements.txt" -discern_post_requirements_file: "{{ discern_code_dir }}/requirements.txt" -discern_user: "discern" - -discern_ease_venv_dir: "{{ discern_venv_dir }}" -discern_ease_code_dir: "{{ discern_app_dir }}/ease" -discern_ease_source_repo: https://github.com/edx/ease.git -discern_ease_version: 'HEAD' -discern_ease_pre_requirements_file: "{{ discern_ease_code_dir }}/pre-requirements.txt" -discern_ease_post_requirements_file: "{{ discern_ease_code_dir }}/requirements.txt" - -discern_nltk_data_dir: "{{ discern_data_dir}}/nltk_data" -discern_nltk_download_url: http://edx-static.s3.amazonaws.com/nltk/nltk-data-20131113.tar.gz -discern_nltk_tmp_file: "{{ discern_data_dir }}/nltk.tmp.tar.tz" - -discern_source_repo: https://github.com/edx/discern.git -discern_settings: discern.aws -discern_version: master -discern_gunicorn_port: 8070 -discern_gunicorn_host: 127.0.0.1 - -discern_worker_mult: 2 -discern_env_config: - ACCOUNT_EMAIL_VERIFICATION: "mandatory" - AWS_SES_REGION_NAME: "us-east-1" - DEFAULT_FROM_EMAIL: "registration@example.com" - DNS_HOSTNAME: "" - ELB_HOSTNAME: "" - EMAIL_BACKEND: "django.core.mail.backends.smtp.EmailBackend" - S3_BUCKETNAME: "" - USE_S3_TO_STORE_MODElS: false - - -discern_auth_config: - AWS_ACCESS_KEY_ID: $DISCERN_AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY: $DISCERN_SECRET_ACCESS_KEY - BROKER_URL: $DISCERN_BROKER_URL - CACHES: - default: - BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' - LOCATION: $DISCERN_MEMCACHE - CELERY_RESULT_BACKEND: $DISCERN_RESULT_BACKEND - DATABASES: - default: - ENGINE: django.db.backends.mysql - HOST: $DISCERN_MYSQL_HOST - NAME: $DISCERN_MYSQL_DB_NAME - PASSWORD: $DISCERN_MYSQL_PASSWORD - PORT: $DISCERN_MYSQL_PORT - USER: $DISCERN_MYSQL_USER - GOOGLE_ANALYTICS_PROPERTY_ID: $DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID - - -discern_debian_pkgs: - - policykit-1 - - python-virtualenv - - gcc - - g++ - - build-essential - - python-dev - - gfortran - - libfreetype6-dev - - libpng12-dev - - libxml2-dev - - libxslt1-dev - - libreadline6 - - libreadline6-dev - - redis-server - - python-pip - - ipython - - nginx - - libmysqlclient-dev - - libblas3gf - - libblas-dev - - liblapack3gf - - liblapack-dev - - libatlas-base-dev - - curl - - yui-compressor - -discern_ease_debian_pkgs: - - python-pip - - gcc - - g++ - - gfortran - - libblas3gf - - libblas-dev - - liblapack3gf - - liblapack-dev - - libatlas-base-dev - - libxml2-dev - - libxslt1-dev - - aspell - - python diff --git a/playbooks/roles/discern/files/git_ssh.sh b/playbooks/roles/discern/files/git_ssh.sh deleted file mode 100644 index 9e6cc4f2b9c..00000000000 --- a/playbooks/roles/discern/files/git_ssh.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no "$@" diff --git a/playbooks/roles/discern/files/sudoers-discern b/playbooks/roles/discern/files/sudoers-discern deleted file mode 100644 index 3aed4435c47..00000000000 --- a/playbooks/roles/discern/files/sudoers-discern +++ /dev/null @@ -1 +0,0 @@ -Defaults env_keep+=SSH_AUTH_SOCK diff --git a/playbooks/roles/discern/handlers/main.yml b/playbooks/roles/discern/handlers/main.yml deleted file mode 100644 index f3d39dc9a5a..00000000000 --- a/playbooks/roles/discern/handlers/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: restart discern - supervisorctl_local: > - name=discern - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - when: discern_installed is defined - with_items: - - discern - - discern_celery diff --git a/playbooks/roles/discern/meta/main.yml b/playbooks/roles/discern/meta/main.yml deleted file mode 100644 index 107f1e98c29..00000000000 --- a/playbooks/roles/discern/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - supervisor diff --git a/playbooks/roles/discern/tasks/deploy.yml b/playbooks/roles/discern/tasks/deploy.yml deleted file mode 100644 index 827808ac52e..00000000000 --- a/playbooks/roles/discern/tasks/deploy.yml +++ /dev/null @@ -1,127 +0,0 @@ ---- - -- name: create supervisor scripts - discern, discern_celery - template: > - src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf - owner={{ supervisor_user }} mode=0644 - sudo_user: "{{ supervisor_user }}" - with_items: ['discern', 'discern_celery'] - -#Upload config files for django (auth and env) -- name: create discern application config env.json file - template: src=env.json.j2 dest={{ discern_app_dir }}/env.json - sudo_user: "{{ discern_user }}" - notify: - - restart discern - -- name: create discern auth file auth.json - template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json - sudo_user: "{{ discern_user }}" - notify: - - restart discern - -- name: git checkout discern repo into discern_code_dir - git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }} - sudo_user: "{{ discern_user }}" - notify: - - restart discern - -- name: git checkout ease repo into discern_ease_code_dir - git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }} - sudo_user: "{{ discern_user }}" - notify: - - restart discern - -#Numpy has to be a pre-requirement in order for scipy to build -- name : install python pre-requirements for discern and ease - pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present - sudo_user: "{{ discern_user }}" - notify: - - restart discern - with_items: - - "{{ discern_pre_requirements_file }}" - - "{{ discern_ease_pre_requirements_file }}" - -- name : install python requirements for discern and ease - pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present - sudo_user: "{{ discern_user }}" - notify: - - restart discern - with_items: - - "{{ discern_post_requirements_file }}" - - "{{ discern_ease_post_requirements_file }}" - -- name: install ease python package - shell: > - {{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install - notify: - - restart discern - -- name: download and install nltk - shell: | - set -e - curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }} - tar zxf {{ discern_nltk_tmp_file }} - rm -f {{ discern_nltk_tmp_file }} - touch {{ discern_nltk_download_url|basename }}-installed - creates={{ discern_data_dir }}/{{ discern_nltk_download_url|basename }}-installed - chdir={{ discern_data_dir }} - sudo_user: "{{ discern_user }}" - notify: - - restart discern - - -#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not -#support virtualenvs as of this comment -- name: django syncdb migrate and collectstatic for discern - shell: > - {{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}} - chdir={{ discern_code_dir }} - sudo_user: "{{ discern_user }}" - notify: - - restart discern - with_items: - - syncdb - - migrate - - collectstatic -#Have this separate from the other three because it doesn't take the noinput flag -- name: django update_index for discern - shell: > - {{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}} - chdir={{ discern_code_dir }} - sudo_user: "{{ discern_user }}" - notify: - - restart discern - - - # call supervisorctl update. this reloads - # the supervisorctl config and restarts - # the services if any of the configurations - # have changed. - # -- name: update supervisor configuration - shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" - register: supervisor_update - sudo_user: "{{ supervisor_service_user }}" - changed_when: supervisor_update.stdout != "" - -- name: ensure discern, discern_celery has started - supervisorctl_local: > - name={{ item }} - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - with_items: - - discern - - discern_celery - -- name: create a symlink for venv python - file: > - src="http://github.com/%7B%7B%20discern_venv_bin%20%7D%7D/%7B%7B%20item%20%7D%7D" - dest={{ COMMON_BIN_DIR }}/{{ item }}.discern - state=link - with_items: - - python - - pip - -- set_fact: discern_installed=true diff --git a/playbooks/roles/discern/tasks/main.yml b/playbooks/roles/discern/tasks/main.yml deleted file mode 100644 index 16f5d9fe655..00000000000 --- a/playbooks/roles/discern/tasks/main.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: create application user - user: > - name="{{ discern_user }}" - home="{{ discern_app_dir }}" - createhome=no - shell=/bin/false - notify: - - restart discern - -- name: create discern app dirs owned by discern - file: > - path="{{ item }}" - state=directory - owner="{{ discern_user }}" - group="{{ common_web_group }}" - notify: - - restart discern - with_items: - - "{{ discern_app_dir }}" - - "{{ discern_venvs_dir }}" - -- name: create discern data dir, owned by {{ common_web_user }} - file: > - path="{{ discern_data_dir }}" state=directory - owner="{{ common_web_user }}" group="{{ discern_user }}" - mode=0775 - notify: - - restart discern - -- name: install debian packages that discern needs - apt: pkg={{ item }} state=present - notify: - - restart discern - with_items: discern_debian_pkgs - -- name: install debian packages for ease that discern needs - apt: pkg={{ item }} state=present - notify: - - restart discern - with_items: discern_ease_debian_pkgs - -- name: copy sudoers file for discern - copy: > - src=sudoers-discern dest=/etc/sudoers.d/discern - mode=0440 validate='visudo -cf %s' owner=root group=root - notify: - - restart discern - -#Needed if using redis to prevent memory issues -- name: change memory commit settings -- needed for redis - command: sysctl vm.overcommit_memory=1 - notify: - - restart discern - -- include: deploy.yml tags=deploy diff --git a/playbooks/roles/discern/templates/auth.json.j2 b/playbooks/roles/discern/templates/auth.json.j2 deleted file mode 100644 index 5a14a9cefcb..00000000000 --- a/playbooks/roles/discern/templates/auth.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ discern_auth_config | to_nice_json }} diff --git a/playbooks/roles/discern/templates/celery.conf.j2 b/playbooks/roles/discern/templates/celery.conf.j2 deleted file mode 100644 index fb9d6312a74..00000000000 --- a/playbooks/roles/discern/templates/celery.conf.j2 +++ /dev/null @@ -1,17 +0,0 @@ -#Celery task for ml api - -description "Celery ML api" -author "Vik Paruchuri " - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn -respawn limit 3 30 - -env DJANGO_SETTINGS_MODULE={{discern_settings}} - -chdir {{ discern_code_dir }} -setuid {{discern_user}} - -exec {{ discern_venv_dir }}/bin/python {{ discern_code_dir }}/manage.py celeryd --loglevel=info --settings={{ discern_settings }} --pythonpath={{ discern_code_dir }} -B --autoscale={{ ansible_processor_cores * 2 }},1 diff --git a/playbooks/roles/discern/templates/discern.conf.j2 b/playbooks/roles/discern/templates/discern.conf.j2 deleted file mode 100644 index 8b64ce24d57..00000000000 --- a/playbooks/roles/discern/templates/discern.conf.j2 +++ /dev/null @@ -1,14 +0,0 @@ -[program:discern] - -{% if ansible_processor|length > 0 %} -command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ ansible_processor|length * discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi -{% else %} -command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi -{% endif %} -user={{ common_web_user }} -directory={{ discern_code_dir }} -environment=LANG={{ DISCERN_LANG }},DJANGO_SETTINGS_MODULE={{ discern_settings }},SERVICE_VARIANT=discern -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/discern/templates/discern_celery.conf.j2 b/playbooks/roles/discern/templates/discern_celery.conf.j2 deleted file mode 100644 index 643e2d6d131..00000000000 --- a/playbooks/roles/discern/templates/discern_celery.conf.j2 +++ /dev/null @@ -1,14 +0,0 @@ -[program:discern_celery] - -command={{ discern_venv_bin }}/python {{ discern_code_dir }}/manage.py celeryd --loglevel=info --settings=discern.aws --pythonpath={{ discern_code_dir }} -B --autoscale=4,1 --schedule={{ discern_data_dir }}/celerybeat-schedule - -user={{ common_web_user }} -directory={{ discern_code_dir }} - -environment=DJANGO_SETTINGS_MODULE=discern.aws,SERVICE_VARIANT=discern - -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log - -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/discern/templates/env.json.j2 b/playbooks/roles/discern/templates/env.json.j2 deleted file mode 100644 index 2b769c2d139..00000000000 --- a/playbooks/roles/discern/templates/env.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ discern_env_config | to_nice_json }} diff --git a/playbooks/roles/discovery/defaults/main.yml b/playbooks/roles/discovery/defaults/main.yml new file mode 100644 index 00000000000..0660c208325 --- /dev/null +++ b/playbooks/roles/discovery/defaults/main.yml @@ -0,0 +1,228 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role discovery +# + +DISCOVERY_GIT_IDENTITY: !!null + +# +# vars are namespace with the module name. +# +discovery_service_name: "discovery" +discovery_gunicorn_port: 8381 + +DISCOVERY_DEFAULT_DB_NAME: 'discovery' +DISCOVERY_MYSQL: 'localhost' +# MySQL usernames are limited to 16 characters +DISCOVERY_MYSQL_USER: 'discov001' +DISCOVERY_MYSQL_PASSWORD: 'password' +DISCOVERY_MYSQL_REPLICA_HOST: 'localhost' +DISCOVERY_MYSQL_CONN_MAX_AGE: 60 + +discovery_environment: + DISCOVERY_CFG: "{{ COMMON_CFG_DIR }}/{{ discovery_service_name }}.yml" + +discovery_user: "{{ discovery_service_name }}" +discovery_home: "{{ COMMON_APP_DIR }}/{{ discovery_service_name }}" +discovery_code_dir: "{{ discovery_home }}/{{ discovery_service_name }}" + +DISCOVERY_NODE_VERSION: '16.14.0' +DISCOVERY_NPM_VERSION: '8.5.5' +DISCOVERY_USE_PYTHON38: True + +# +# OS packages +# + +discovery_debian_pkgs: + - libxml2-dev + - libxslt1-dev + - libjpeg-dev + - libcairo2-dev + + +DISCOVERY_NGINX_PORT: "1{{ discovery_gunicorn_port }}" +DISCOVERY_SSL_NGINX_PORT: "4{{ discovery_gunicorn_port }}" + +# Using SSL? See https://www.elastic.co/guide/en/shield/current/ssl-tls.html. +# Using AWS? Use the AWS-provided host (e.g. https://search-test-abc123.us-east-1.es.amazonaws.com/). +# Assuming the port corresponds that of the protocol (e.g. http=80, https=443), there is no need to specify a port. +DISCOVERY_ELASTICSEARCH_URL: 'http://127.0.0.1:9200/' +DISCOVERY_ELASTICSEARCH_INDEX_NAME: 'catalog' + +DISCOVERY_OPENSEARCH_HOST: 'localhost' +DISCOVERY_OPENSEARCH_PORT: 9202 +DISCOVERY_OPENSEARCH_USE_SSL: false + +DISCOVERY_MEMCACHE: [ 'memcache' ] + +DISCOVERY_VERSION: "master" +DISCOVERY_DJANGO_SETTINGS_MODULE: "course_discovery.settings.production" +DISCOVERY_URL_ROOT: 'http://discovery:{{ DISCOVERY_NGINX_PORT }}' +DISCOVERY_LOGOUT_URL: '{{ DISCOVERY_URL_ROOT }}/logout/' + +DISCOVERY_SECRET_KEY: 'Your secret key here' + +DISCOVERY_LANGUAGE_CODE: 'en' + +## Configuration for django-parler package. For more information visit +## https://django-parler.readthedocs.io/en/latest/configuration.html#parler-languages +DISCOVERY_PARLER_DEFAULT_LANGUAGE_CODE: '{{DISCOVERY_LANGUAGE_CODE}}' +DISCOVERY_PARLER_LANGUAGES : + 1: + - code: 'en' + default: + fallbacks: + - '{{DISCOVERY_PARLER_DEFAULT_LANGUAGE_CODE}}' + hide_untranslated: 'False' + +DISCOVERY_DEFAULT_PARTNER_ID: 1 +DISCOVERY_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +# Used to automatically configure OAuth2 Client +DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'discovery-sso-key' +DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'discovery-sso-secret' +DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'discovery-backend-service-key' +DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'discovery-backend-service-secret' + +DISCOVERY_PLATFORM_NAME: 'Your Platform Name Here' + +DISCOVERY_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ discovery_service_name }}' +DISCOVERY_MEDIA_ROOT: '{{ DISCOVERY_DATA_DIR }}/media' +DISCOVERY_MEDIA_URL: '/media/' + +DISCOVERY_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ DISCOVERY_MEDIA_ROOT }}' + MEDIA_URL: '{{ DISCOVERY_MEDIA_URL }}' + +DISCOVERY_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +# You can set different email backends with django: +# https://docs.djangoproject.com/en/1.9/topics/email/#email-backends +DISCOVERY_EMAIL_BACKEND: 'django_ses.SESBackend' + +# For email backend django-ses, the following settings are required +DISCOVERY_AWS_SES_REGION_NAME: 'us-east-1' +DISCOVERY_AWS_SES_REGION_ENDPOINT: 'email.us-east-1.amazonaws.com' + +# For default email backend SMTP, following settings are required +DISCOVERY_EMAIL_HOST: 'localhost' +DISCOVERY_EMAIL_PORT: 25 +DISCOVERY_EMAIL_USE_TLS: False +DISCOVERY_EMAIL_HOST_USER: '' +DISCOVERY_EMAIL_HOST_PASSWORD: '' + +DISCOVERY_ENABLE_PUBLISHER: false +DISCOVERY_PUBLISHER_FROM_EMAIL: !!null + +DISCOVERY_OPENEXCHANGERATES_API_KEY: '' + +DISCOVERY_GUNICORN_EXTRA: '' + +DISCOVERY_GUNICORN_WORKERS: 2 + +DISCOVERY_EXTRA_APPS: [] + +DISCOVERY_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + +DISCOVERY_CELERY_BROKER_URL: "redis://:@127.0.0.1:6379/" + +DISCOVERY_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: 'course-discovery.git' + VERSION: "{{ DISCOVERY_VERSION }}" + DESTINATION: "{{ discovery_code_dir }}" + SSH_KEY: "{{ DISCOVERY_GIT_IDENTITY }}" + +# List of additional python packages that should be installed into the +# discovery virtual environment. +# `name` (required), `version` (optional), and `extra_args` (optional) +# are supported and correspond to the options of ansible's pip module. +# Example: +# DISCOVERY_EXTRA_REQUIREMENTS: +# - name: mypackage +# version: 1.0.1 +# - name: git+https://git.myproject.org/MyProject#egg=MyProject +DISCOVERY_EXTRA_REQUIREMENTS: [] + +discovery_service_config_overrides: + ELASTICSEARCH_CLUSTER_URL: '{{ DISCOVERY_ELASTICSEARCH_URL }}' + ELASTICSEARCH_INDEX_NAME: '{{ DISCOVERY_ELASTICSEARCH_INDEX_NAME }}' + + DISCOVERY_OPENSEARCH_HOST: '{{ DISCOVERY_OPENSEARCH_HOST }}' + DISCOVERY_OPENSEARCH_PORT: '{{ DISCOVERY_OPENSEARCH_PORT }}' + DISCOVERY_OPENSEARCH_USE_SSL: '{{ DISCOVERY_OPENSEARCH_USE_SSL }}' + + PLATFORM_NAME: '{{ DISCOVERY_PLATFORM_NAME }}' + + DEFAULT_PARTNER_ID: '{{ DISCOVERY_DEFAULT_PARTNER_ID }}' + + EMAIL_BACKEND: '{{ DISCOVERY_EMAIL_BACKEND }}' + + # Settings for django-ses email backend + AWS_SES_REGION_NAME: '{{ DISCOVERY_AWS_SES_REGION_NAME }}' + AWS_SES_REGION_ENDPOINT: '{{ DISCOVERY_AWS_SES_REGION_ENDPOINT }}' + + # Settings for default django SMTP email backend + EMAIL_HOST: '{{ DISCOVERY_EMAIL_HOST }}' + EMAIL_PORT: '{{ DISCOVERY_EMAIL_PORT }}' + EMAIL_USE_TLS: '{{ DISCOVERY_EMAIL_USE_TLS }}' + EMAIL_HOST_USER: '{{ DISCOVERY_EMAIL_HOST_USER }}' + EMAIL_HOST_PASSWORD: '{{ DISCOVERY_EMAIL_HOST_PASSWORD }}' + + ENABLE_PUBLISHER: '{{ DISCOVERY_ENABLE_PUBLISHER }}' + PUBLISHER_FROM_EMAIL: '{{ DISCOVERY_PUBLISHER_FROM_EMAIL }}' + + OPENEXCHANGERATES_API_KEY: '{{ DISCOVERY_OPENEXCHANGERATES_API_KEY }}' + + LANGUAGE_CODE: '{{DISCOVERY_LANGUAGE_CODE}}' + PARLER_DEFAULT_LANGUAGE_CODE: '{{DISCOVERY_PARLER_DEFAULT_LANGUAGE_CODE}}' + PARLER_LANGUAGES : '{{DISCOVERY_PARLER_LANGUAGES}}' + CSRF_COOKIE_SECURE: "{{ DISCOVERY_CSRF_COOKIE_SECURE }}" + CORS_ORIGIN_WHITELIST: "{{ DISCOVERY_CORS_ORIGIN_WHITELIST }}" + + USERNAME_REPLACEMENT_WORKER: "{{ DISCOVERY_USERNAME_REPLACEMENT_WORKER }}" + + CELERY_BROKER_URL: "{{ DISCOVERY_CELERY_BROKER_URL }}" + +# See edx_django_service_automated_users for an example of what this should be +DISCOVERY_AUTOMATED_USERS: {} + +DISCOVERY_POST_MIGRATE_COMMANDS: [] + +DISCOVERY_CSRF_COOKIE_SECURE: false +DISCOVERY_CORS_ORIGIN_WHITELIST: [] + +DISCOVERY_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +DISCOVERY_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +DISCOVERY_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +DISCOVERY_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +DISCOVERY_ENABLE_ADMIN_URLS_RESTRICTION: false + +DISCOVERY_ADMIN_URLS: + - admin + +DISCOVERY_CELERY_DEFAULT_QUEUE: 'discovery.default' + +# Worker settings +worker_django_settings_module: "{{ DISCOVERY_DJANGO_SETTINGS_MODULE }}" +DISCOVERY_CELERY_WORKERS: + - queue: '{{ DISCOVERY_CELERY_DEFAULT_QUEUE }}' + concurrency: 1 + monitor: True +DISCOVERY_WORKERS: "{{ DISCOVERY_CELERY_WORKERS }}" diff --git a/playbooks/roles/discovery/meta/main.yml b/playbooks/roles/discovery/meta/main.yml new file mode 100644 index 00000000000..2fb288fe078 --- /dev/null +++ b/playbooks/roles/discovery/meta/main.yml @@ -0,0 +1,88 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role discovery +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: 'foo' +# my_role_var1: 'bar' +# } +dependencies: + - role: edx_django_service + edx_django_service_use_python38: '{{ DISCOVERY_USE_PYTHON38 }}' + edx_django_service_repos: '{{ DISCOVERY_REPOS }}' + edx_django_service_name: '{{ discovery_service_name }}' + edx_django_service_user: '{{ discovery_user }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ discovery_service_name }}' + edx_django_service_config_overrides: '{{ discovery_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ discovery_debian_pkgs }}' + edx_django_service_extra_requirements: '{{ DISCOVERY_EXTRA_REQUIREMENTS }}' + edx_django_service_gunicorn_port: '{{ discovery_gunicorn_port }}' + edx_django_service_django_settings_module: '{{ DISCOVERY_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ discovery_environment }}' + edx_django_service_gunicorn_extra: '{{ DISCOVERY_GUNICORN_EXTRA }}' + edx_django_service_gunicorn_workers: '{{ DISCOVERY_GUNICORN_WORKERS }}' + edx_django_service_wsgi_name: 'course_discovery' + edx_django_service_nginx_port: '{{ DISCOVERY_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ DISCOVERY_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ DISCOVERY_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ DISCOVERY_SECRET_KEY }}' + edx_django_service_staticfiles_storage: '{{ DISCOVERY_STATICFILES_STORAGE }}' + edx_django_service_media_storage_backend: '{{ DISCOVERY_MEDIA_STORAGE_BACKEND }}' + edx_django_service_memcache: '{{ DISCOVERY_MEMCACHE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_extra_apps: '{{ DISCOVERY_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ DISCOVERY_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_node_version: '{{ DISCOVERY_NODE_VERSION }}' + edx_django_service_npm_version: '{{ DISCOVERY_NPM_VERSION }}' + edx_django_service_automated_users: '{{ DISCOVERY_AUTOMATED_USERS }}' + edx_django_service_post_migrate_commands: '{{ DISCOVERY_POST_MIGRATE_COMMANDS }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ DISCOVERY_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ DISCOVERY_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ DISCOVERY_COPY_CONFIG_ENABLED }}' + edx_django_service_max_webserver_upload: 10 + edx_django_service_migration_check_services: '{{ discovery_service_name }},{{ discovery_service_name }}-workers' + edx_django_service_enable_celery_workers: true + edx_django_service_workers: '{{ DISCOVERY_WORKERS }}' + EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: '{{ DISCOVERY_ENABLE_ADMIN_URLS_RESTRICTION }}' + EDX_DJANGO_SERVICE_ADMIN_URLS: '{{ DISCOVERY_ADMIN_URLS }}' + + edx_django_service_databases: + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ DISCOVERY_DEFAULT_DB_NAME }}' + USER: '{{ DISCOVERY_MYSQL_USER }}' + PASSWORD: '{{ DISCOVERY_MYSQL_PASSWORD }}' + HOST: '{{ DISCOVERY_MYSQL }}' + PORT: 3306 + ATOMIC_REQUESTS: 'false' + CONN_MAX_AGE: '{{ DISCOVERY_MYSQL_CONN_MAX_AGE }}' + OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + read_replica: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ DISCOVERY_DEFAULT_DB_NAME }}' + USER: '{{ DISCOVERY_MYSQL_USER }}' + PASSWORD: '{{ DISCOVERY_MYSQL_PASSWORD }}' + HOST: '{{ DISCOVERY_MYSQL_REPLICA_HOST }}' + PORT: 3306 + ATOMIC_REQUESTS: 'false' + CONN_MAX_AGE: '{{ DISCOVERY_MYSQL_CONN_MAX_AGE }}' + OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" diff --git a/playbooks/roles/discovery/tasks/main.yml b/playbooks/roles/discovery/tasks/main.yml new file mode 100644 index 00000000000..2f950be92e3 --- /dev/null +++ b/playbooks/roles/discovery/tasks/main.yml @@ -0,0 +1,22 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role discovery +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# diff --git a/playbooks/roles/docker-tools/defaults/main.yml b/playbooks/roles/docker-tools/defaults/main.yml new file mode 100644 index 00000000000..003e6dd7342 --- /dev/null +++ b/playbooks/roles/docker-tools/defaults/main.yml @@ -0,0 +1,15 @@ +cache_valid_time: 3600 + +docker_tools_deps_deb_pkgs: + - apt-transport-https + - ca-certificates + - python3-pip + +docker_compose_pkg_url: "https://github.com/docker/compose/releases/download/v2.15.1/docker-compose-linux-x86_64" +docker_apt_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_repos: + - "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + - "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} edge" + +docker_group: "docker" +docker_users: [] diff --git a/playbooks/roles/docker-tools/meta/main.yml b/playbooks/roles/docker-tools/meta/main.yml new file mode 100644 index 00000000000..e53b9cc9399 --- /dev/null +++ b/playbooks/roles/docker-tools/meta/main.yml @@ -0,0 +1,4 @@ +--- + +dependencies: + - common diff --git a/playbooks/roles/docker-tools/tasks/main.yml b/playbooks/roles/docker-tools/tasks/main.yml new file mode 100644 index 00000000000..24166a97b53 --- /dev/null +++ b/playbooks/roles/docker-tools/tasks/main.yml @@ -0,0 +1,106 @@ +# Install docker-engine and docker-compose +# Add users to docker group +--- +- name: add docker group + group: + name: "{{ docker_group }}" + tags: + - install + - install:base + +- name: add users to docker group + user: + name: "{{ item }}" + groups: "{{ docker_group }}" + append: yes + with_items: "{{ docker_users }}" + tags: + - install + - install:base + +- name: install package dependencies + apt: + name: "{{ docker_tools_deps_deb_pkgs }}" + update_cache: yes + cache_valid_time: "{{ cache_valid_time }}" + tags: + - install + - install:system-requirements + when: ansible_distribution_release != 'focal' + +- name: Docker repo Ubuntu Focal + when: ansible_distribution_release == 'focal' + block: + - name: add docker apt key Ubuntu Focal + apt_key: + url: "{{ docker_apt_key_url }}" + tags: + - install + - install:configuration + + - name: add docker repo Ubuntu Focal + ansible.builtin.apt_repository: + repo: "{{ item }}" + filename: 'docker' + with_items: "{{ docker_repos }}" + tags: + - install + - install:configuration + +- name: Docker repo post Ubuntu Focal + when: ansible_distribution_release != 'focal' + block: + - name: download docker repo key post Ubuntu Focal + ansible.builtin.get_url: + url: "{{ docker_apt_key_url }}" + dest: /etc/apt/keyrings/docker.asc + +# Apt can handle armored .asc files, but the offical docker instructions tell people to dearmor the key. +# If we try to use the asc file on a box with docker already installed ansible will create a duplicate +# entry and apt will complain with the error "Conflicting values set for option Signed-By..." + - name: Dearmor docker repo gpg key post Ubuntu Focal + ansible.builtin.shell: + cmd: 'gpg --dearmor < /etc/apt/keyrings/docker.asc > /etc/apt/keyrings/docker.gpg' + + - name: add docker repo post Ubuntu Focal + apt_repository: + repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + filename: 'docker' + tags: + - install + - install:configuration + +- name: install docker-engine + apt: + name: ["docker-ce", "docker-ce-cli", "containerd.io", "docker-compose-plugin"] + update_cache: yes + tags: + - install + - install:system-requirements + +- name: Are we in a Docker container + shell: echo $(egrep -q 'docker' /proc/self/cgroup && echo 'yes' || echo 'no') + ignore_errors: yes + register: docker_container + tags: + - install + - install:base + +- name: start docker service + service: + name: docker + enabled: yes + state: started + when: docker_container.stdout != 'yes' + tags: + - install + - install:configuration + +- name: install docker-compose + get_url: + dest: /usr/local/bin/docker-compose + url: "{{ docker_compose_pkg_url }}" + mode: 0755 + tags: + - install + - install:system-requirements diff --git a/playbooks/roles/ecommerce/defaults/main.yml b/playbooks/roles/ecommerce/defaults/main.yml new file mode 100644 index 00000000000..91217930af2 --- /dev/null +++ b/playbooks/roles/ecommerce/defaults/main.yml @@ -0,0 +1,365 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role ecommerce +# + +ECOMMERCE_GIT_IDENTITY: !!null + +ECOMMERCE_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: "ecommerce.git" + VERSION: "{{ ECOMMERCE_VERSION }}" + DESTINATION: "{{ ecommerce_code_dir }}" + SSH_KEY: "{{ ECOMMERCE_GIT_IDENTITY }}" + +# List of additional python packages that should be installed into the +# ecommerce virtual environment. +# `name` (required), `version` (optional), and `extra_args` (optional) +# are supported and correspond to the options of ansible's pip module. +# Example: +# ECOMMERCE_EXTRA_REQUIREMENTS: +# - name: mypackage +# version: 1.0.1 +# - name: git+https://git.myproject.org/MyProject#egg=MyProject +ECOMMERCE_EXTRA_REQUIREMENTS: [] +ECOMMERCE_ADD_EXTRA_REQUIREMENTS_TO_REQUIREMENTS_FILE: false + +# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC +# and a key being provided via NEWRELIC_LICENSE_KEY +ECOMMERCE_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ ecommerce_service_name }}" +ECOMMERCE_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}" +ECOMMERCE_NGINX_PORT: 18130 +ECOMMERCE_SSL_NGINX_PORT: 48130 + +ECOMMERCE_MEMCACHE: + - localhost:11211 + +ECOMMERCE_DATABASE_NAME: ecommerce +ECOMMERCE_DATABASE_USER: ecomm001 +ECOMMERCE_DATABASE_PASSWORD: password +ECOMMERCE_DATABASE_HOST: localhost +ECOMMERCE_DATABASE_CONN_MAX_AGE: 60 + +ECOMMERCE_VERSION: "master" +ECOMMERCE_DJANGO_SETTINGS_MODULE: "ecommerce.settings.production" + +ECOMMERCE_SESSION_EXPIRE_AT_BROWSER_CLOSE: false +ECOMMERCE_SECRET_KEY: 'Your secret key here' +ECOMMERCE_LANGUAGE_CODE: 'en' +ECOMMERCE_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' +ECOMMERCE_EDX_API_KEY: 'PUT_YOUR_API_KEY_HERE' # This should match the value set for edxapp +ECOMMERCE_ECOMMERCE_URL_ROOT: 'http://localhost:8002/' +ECOMMERCE_LOGOUT_URL: '{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}/logout/' +ECOMMERCE_LMS_URL_ROOT: 'http://127.0.0.1:8000/' +ECOMMERCE_JWT_ALGORITHM: 'HS256' +ECOMMERCE_JWT_VERIFY_EXPIRATION: true +ECOMMERCE_JWT_ISSUERS: + - ISSUER: "{{ COMMON_JWT_ISSUER }}" + AUDIENCE: "{{ COMMON_JWT_AUDIENCE }}" + SECRET_KEY: "{{ COMMON_JWT_SECRET_KEY }}" + - ISSUER: 'ecommerce_worker' # Must match the value of JWT_ISSUER configured for the ecommerce worker. + AUDIENCE: "{{ COMMON_JWT_AUDIENCE }}" + SECRET_KEY: "{{ COMMON_JWT_SECRET_KEY }}" + +ECOMMERCE_JWT_LEEWAY: 1 + +ECOMMERCE_ENROLLMENT_FULFILLMENT_TIMEOUT: 7 +ECOMMERCE_LOGGING_ROOT_OVERRIDES: {} +ECOMMERCE_LOGGING_SUBSECTION_OVERRIDES: {} + +# Needed to link to the payment micro-frontend. +ECOMMERCE_PAYMENT_MICROFRONTEND_URL: !!null + +# Sailthru +ECOMMERCE_SAILTHRU_KEY: 'sailthru key here' +ECOMMERCE_SAILTHRU_SECRET: 'sailthru secret here' + +# Used to automatically configure OAuth2 Client +ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'ecommerce-sso-key' +ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'ecommerce-sso-secret' +ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'ecommerce-backend-service-key' +ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'ecommerce-backend-service-secret' +ECOMMERCE_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +# Settings for affiliate cookie tracking +ECOMMERCE_AFFILIATE_COOKIE_NAME: '{{ EDXAPP_AFFILIATE_COOKIE_NAME | default("dev_affiliate_id") }}' + +ECOMMERCE_OSCAR_DEFAULT_CURRENCY: 'USD' +ECOMMERCE_OSCAR_FROM_EMAIL: 'oscar@example.com' + +# NOTE: The contents of the certificates should be set in private configuration +ecommerce_apple_pay_merchant_certificate_directory: '/edx/etc/ssl' +ecommerce_apple_pay_merchant_certificate_filename: 'apple_pay_merchant.pem' +ecommerce_apple_pay_merchant_certificate_path: '{{ ecommerce_apple_pay_merchant_certificate_directory }}/{{ ecommerce_apple_pay_merchant_certificate_filename }}' +ECOMMERCE_APPLE_PAY_MERCHANT_CERTIFICATE: | + Your PEM file, containing a public and private key, + should be set in private configuration. This is how you + implement a multi-line string in YAML. +ECOMMERCE_APPLE_PAY_MERCHANT_ID_DOMAIN_ASSOCIATION: | + This value should also be in private configuration. It, too, + will span multiple lines. +ECOMMERCE_APPLE_PAY_MERCHANT_IDENTIFIER: 'merchant.com.example' +ECOMMERCE_APPLE_PAY_COUNTRY_CODE: 'US' + +# CyberSource related +ECOMMERCE_CYBERSOURCE_PROFILE_ID: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_MERCHANT_ID: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_ACCESS_KEY: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_SECRET_KEY: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_SOP_ACCESS_KEY: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_SOP_PROFILE_ID: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_SOP_SECRET_KEY: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_SOP_PAYMENT_PAGE_URL: 'https://testsecureacceptance.cybersource.com/silent/pay' +ECOMMERCE_CYBERSOURCE_TRANSACTION_KEY: 'SET-ME-PLEASE' +ECOMMERCE_CYBERSOURCE_PAYMENT_PAGE_URL: 'https://testsecureacceptance.cybersource.com/pay' +ECOMMERCE_CYBERSOURCE_RECEIPT_PAGE_URL: '/checkout/receipt/' +ECOMMERCE_CYBERSOURCE_CANCEL_PAGE_URL: '/checkout/cancel-checkout/' +ECOMMERCE_CYBERSOURCE_SEND_LEVEL_2_3_DETAILS: true +ECOMMERCE_CYBERSOURCE_SOAP_API_URL: 'https://ics2wstest.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.140.wsdl' + +# PayPal +ECOMMERCE_PAYPAL_MODE: 'sandbox' +ECOMMERCE_PAYPAL_CLIENT_ID: 'SET-ME-PLEASE' +ECOMMERCE_PAYPAL_CLIENT_SECRET: 'SET-ME-PLEASE' +ECOMMERCE_PAYPAL_RECEIPT_URL: '/checkout/receipt/' +ECOMMERCE_PAYPAL_CANCEL_URL: '/checkout/cancel-checkout/' +ECOMMERCE_PAYPAL_ERROR_URL: '/checkout/error/' + +ECOMMERCE_PAYMENT_PROCESSOR_CONFIG: + edx: + cybersource: + merchant_id: '{{ ECOMMERCE_CYBERSOURCE_MERCHANT_ID }}' + profile_id: '{{ ECOMMERCE_CYBERSOURCE_PROFILE_ID }}' + access_key: '{{ ECOMMERCE_CYBERSOURCE_ACCESS_KEY }}' + secret_key: '{{ ECOMMERCE_CYBERSOURCE_SECRET_KEY }}' + transaction_key: '{{ ECOMMERCE_CYBERSOURCE_TRANSACTION_KEY }}' + payment_page_url: '{{ ECOMMERCE_CYBERSOURCE_PAYMENT_PAGE_URL }}' + receipt_page_url: '{{ ECOMMERCE_CYBERSOURCE_RECEIPT_PAGE_URL }}' + cancel_page_url: '{{ ECOMMERCE_CYBERSOURCE_CANCEL_PAGE_URL }}' + soap_api_url: '{{ ECOMMERCE_CYBERSOURCE_SOAP_API_URL }}' + send_level_2_3_details: '{{ ECOMMERCE_CYBERSOURCE_SEND_LEVEL_2_3_DETAILS }}' + sop_profile_id: '{{ ECOMMERCE_CYBERSOURCE_SOP_PROFILE_ID }}' + sop_access_key: '{{ ECOMMERCE_CYBERSOURCE_SOP_ACCESS_KEY }}' + sop_secret_key: '{{ ECOMMERCE_CYBERSOURCE_SOP_SECRET_KEY }}' + sop_payment_page_url: '{{ ECOMMERCE_CYBERSOURCE_SOP_PAYMENT_PAGE_URL }}' + # NOTE: These are simple placeholders meant to show what keys are needed for Apple Pay. These values + # should be overwritten in private configuration. + apple_pay_merchant_identifier: '{{ ECOMMERCE_APPLE_PAY_MERCHANT_IDENTIFIER }}' + apple_pay_merchant_id_domain_association: '{{ ECOMMERCE_APPLE_PAY_MERCHANT_ID_DOMAIN_ASSOCIATION }}' + apple_pay_merchant_id_certificate_path: '{{ ecommerce_apple_pay_merchant_certificate_path }}' + apple_pay_country_code: '{{ ECOMMERCE_APPLE_PAY_COUNTRY_CODE }}' + paypal: + mode: '{{ ECOMMERCE_PAYPAL_MODE }}' + client_id: '{{ ECOMMERCE_PAYPAL_CLIENT_ID }}' + client_secret: '{{ ECOMMERCE_PAYPAL_CLIENT_SECRET }}' + receipt_url: '{{ ECOMMERCE_PAYPAL_RECEIPT_URL }}' + cancel_checkout_path: '{{ ECOMMERCE_PAYPAL_CANCEL_URL }}' + error_url: '{{ ECOMMERCE_PAYPAL_ERROR_URL }}' + +# JWT payload user attribute mapping +ECOMMERCE_EDX_DRF_EXTENSIONS: + JWT_PAYLOAD_USER_ATTRIBUTE_MAPPING: + administrator: 'is_staff' + email: 'email' + full_name: 'full_name' + tracking_context: 'tracking_context' + user_id: 'lms_user_id' + JWT_PAYLOAD_MERGEABLE_USER_ATTRIBUTES: + - 'tracking_context' + OAUTH2_USER_INFO_URL: '{{ edx_django_service_oauth2_url_root }}/user_info' + +# Theming +ECOMMERCE_PLATFORM_NAME: 'Your Platform Name Here' +ECOMMERCE_THEME_SCSS: 'sass/themes/default.scss' +ECOMMERCE_COMPREHENSIVE_THEME_DIRS: + - '{{ THEMES_CODE_DIR }}/{{ ecommerce_service_name }}' + - '{{ COMMON_APP_DIR }}/{{ ecommerce_service_name }}/{{ ecommerce_service_name }}/ecommerce/themes' + +ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING: false +ECOMMERCE_DEFAULT_SITE_THEME: !!null +ECOMMERCE_STATICFILES_STORAGE: 'ecommerce.theming.storage.ThemeStorage' + +# Celery +ECOMMERCE_BROKER_USERNAME: '' +ECOMMERCE_BROKER_PASSWORD: 'celery' +ECOMMERCE_BROKER_HOST: '{{ ansible_default_ipv4.address }}' +ECOMMERCE_BROKER_PORT: 6379 +ECOMMERCE_BROKER_TRANSPORT: 'redis' +ECOMMERCE_BROKER_URL: '{{ ECOMMERCE_BROKER_TRANSPORT }}://{{ ECOMMERCE_BROKER_USERNAME }}:{{ ECOMMERCE_BROKER_PASSWORD }}@{{ ECOMMERCE_BROKER_HOST }}:{{ ECOMMERCE_BROKER_PORT }}' + +ECOMMERCE_DISCOVERY_SERVICE_URL: 'http://localhost:8008/' +ECOMMERCE_ENTERPRISE_URL: '{{ ECOMMERCE_LMS_URL_ROOT }}' + +ECOMMERCE_CORS_ORIGIN_WHITELIST: [] +ECOMMERCE_CSRF_TRUSTED_ORIGINS: [] +ECOMMERCE_CORS_URLS_REGEX: '' +ECOMMERCE_CORS_ALLOW_CREDENTIALS: false + +ECOMMERCE_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + +ecommerce_config: + LANGUAGE_COOKIE_NAME: '{{ ECOMMERCE_LANGUAGE_COOKIE_NAME }}' + EDX_API_KEY: '{{ ECOMMERCE_EDX_API_KEY }}' + OSCAR_DEFAULT_CURRENCY: '{{ ECOMMERCE_OSCAR_DEFAULT_CURRENCY }}' + OSCAR_FROM_EMAIL: '{{ ECOMMERCE_OSCAR_FROM_EMAIL }}' + + ENTERPRISE_SERVICE_URL: '{{ ECOMMERCE_ENTERPRISE_URL }}/enterprise/' + ECOMMERCE_URL_ROOT: '{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}' + + # TODO LEARNER-3041: Update this service and ecomworker to only use the central JWT access token issuer + JWT_AUTH: + JWT_SECRET_KEY: '{{ COMMON_JWT_SECRET_KEY }}' + JWT_ALGORITHM: '{{ ECOMMERCE_JWT_ALGORITHM }}' + JWT_VERIFY_EXPIRATION: '{{ ECOMMERCE_JWT_VERIFY_EXPIRATION }}' + JWT_LEEWAY: '{{ ECOMMERCE_JWT_LEEWAY }}' + JWT_ISSUERS: '{{ ECOMMERCE_JWT_ISSUERS }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + + EDX_DRF_EXTENSIONS: '{{ ECOMMERCE_EDX_DRF_EXTENSIONS }}' + + AFFILIATE_COOKIE_KEY: '{{ ECOMMERCE_AFFILIATE_COOKIE_NAME }}' + + PAYMENT_PROCESSOR_CONFIG: '{{ ECOMMERCE_PAYMENT_PROCESSOR_CONFIG }}' + + PLATFORM_NAME: '{{ ECOMMERCE_PLATFORM_NAME }}' + THEME_SCSS: '{{ ECOMMERCE_THEME_SCSS }}' + + BROKER_URL: '{{ ECOMMERCE_BROKER_URL }}' + + SAILTHRU_KEY: '{{ ECOMMERCE_SAILTHRU_KEY }}' + SAILTHRU_SECRET: '{{ ECOMMERCE_SAILTHRU_SECRET }}' + + # Theming config + COMPREHENSIVE_THEME_DIRS: "{{ ECOMMERCE_COMPREHENSIVE_THEME_DIRS }}" + ENABLE_COMPREHENSIVE_THEMING: "{{ ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING }}" + DEFAULT_SITE_THEME: "{{ ECOMMERCE_DEFAULT_SITE_THEME }}" + CSRF_COOKIE_SECURE: "{{ ECOMMERCE_CSRF_COOKIE_SECURE }}" + SESSION_COOKIE_SECURE: '{{ ECOMMERCE_SESSION_COOKIE_SECURE}}' + + CORS_ORIGIN_WHITELIST: "{{ ECOMMERCE_CORS_ORIGIN_WHITELIST }}" + CSRF_TRUSTED_ORIGINS: "{{ ECOMMERCE_CSRF_TRUSTED_ORIGINS }}" + CORS_URLS_REGEX: "{{ ECOMMERCE_CORS_URLS_REGEX }}" + CORS_ALLOW_CREDENTIALS: "{{ ECOMMERCE_CORS_ALLOW_CREDENTIALS }}" + + USERNAME_REPLACEMENT_WORKER: "{{ ECOMMERCE_USERNAME_REPLACEMENT_WORKER }}" + + PAYMENT_MICROFRONTEND_URL: "{{ ECOMMERCE_PAYMENT_MICROFRONTEND_URL}}" + + ENROLLMENT_FULFILLMENT_TIMEOUT: "{{ ECOMMERCE_ENROLLMENT_FULFILLMENT_TIMEOUT }}" + LOGGING_ROOT_OVERRIDES: "{{ ECOMMERCE_LOGGING_ROOT_OVERRIDES }}" + LOGGING_SUBSECTION_OVERRIDES: "{{ ECOMMERCE_LOGGING_SUBSECTION_OVERRIDES }}" + +ECOMMERCE_EXTRA_CONFIG_OVERRIDES: {} +ecommerce_service_config_overrides: "{{ ecommerce_config | combine(ECOMMERCE_EXTRA_CONFIG_OVERRIDES) }}" + +ECOMMERCE_GUNICORN_WORKER_CLASS: "sync" +ECOMMERCE_GUNICORN_MAX_REQUESTS: 3000 +ECOMMERCE_GUNICORN_EXTRA: "" +ECOMMERCE_GUNICORN_WORKERS: 2 + +ECOMMERCE_CSRF_COOKIE_SECURE: false + +ECOMMERCE_SESSION_COOKIE_SECURE: true + +# See edx_django_service_automated_users for an example of what this should be +ECOMMERCE_AUTOMATED_USERS: {} + +# Remote config +ECOMMERCE_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ECOMMERCE_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ECOMMERCE_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# MFEs default settings +ECOMMERCE_ENABLE_PAYMENT_MFE: false + +# +# vars are namespace with the module name. +# +ecommerce_service_name: "ecommerce" +ecommerce_user: "{{ ecommerce_service_name }}" +ecommerce_home: "{{ COMMON_APP_DIR }}/{{ ecommerce_service_name }}" +ecommerce_code_dir: "{{ ecommerce_home }}/{{ ecommerce_service_name }}" +ecommerce_venv_dir: "{{ ecommerce_home }}/venvs/{{ ecommerce_service_name }}" + +ecommerce_gunicorn_port: "8130" + +ecommerce_environment: + ECOMMERCE_CFG: "{{ COMMON_CFG_DIR }}/{{ ecommerce_service_name }}.yml" + +ecommerce_create_demo_data: false + +# Antivirus +ECOMMERCE_ENABLE_ANTIVIRUS: false +ECOMMERCE_ANTIVIRUS_SCAN_DIRECTORY: "{{ ecommerce_code_dir }}" + +ECOMMERCE_ENABLE_ADMIN_URLS_RESTRICTION: false + +ECOMMERCE_ADMIN_URLS: + - admin + +ECOMMERCE_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Python 3 conversion +ECOMMERCE_USE_PYTHON38: true + +# ECOM Node and Npm Version +ECOMMERCE_NODE_VERSION: '16.14.0' +ECOMMERCE_NPM_VERSION: '8.5.5' + +# +# OS packages +# + +ecommerce_debian_pkgs: + - libmysqlclient-dev + - libjpeg-dev + - libssl-dev + - libffi-dev + - libsqlite3-dev + - python3-dev + +ecommerce_release_specific_debian_pkgs: + xenial: + - python-dev + bionic: + - python-dev + focal: [] + +ecommerce_redhat_pkgs: [] + +ecommerce_post_migrate_commands: + - command: './manage.py oscar_populate_countries --initial-only' + when: true + - command: > + ./manage.py create_or_update_site + --site-id=1 + --site-domain={{ ECOMMERCE_ECOMMERCE_URL_ROOT.split("://")[1] }} + --partner-code=edX --partner-name="Open edX" + --lms-url-root={{ ECOMMERCE_LMS_URL_ROOT }} + --client-side-payment-processor=cybersource + --payment-processors=cybersource,paypal + --sso-client-id={{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY }} + --sso-client-secret={{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET }} + --backend-service-client-id={{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY }} + --backend-service-client-secret={{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET }} + --from-email staff@example.com + --discovery_api_url={{ ECOMMERCE_DISCOVERY_SERVICE_URL }}/api/v1/ + {{ " --enable-microfrontend-for-basket-page=true" if ECOMMERCE_ENABLE_PAYMENT_MFE else "" }} + {{ " --payment-microfrontend-url="~EDXAPP_LMS_BASE_SCHEME~"://"~MFE_BASE~"/payment" if ECOMMERCE_ENABLE_PAYMENT_MFE else "" }} + + when: '{{ ecommerce_create_demo_data }}' + - command: './manage.py create_demo_data --partner=edX' + when: '{{ ecommerce_create_demo_data }}' diff --git a/playbooks/roles/ecommerce/meta/main.yml b/playbooks/roles/ecommerce/meta/main.yml new file mode 100644 index 00000000000..504deab4ce5 --- /dev/null +++ b/playbooks/roles/ecommerce/meta/main.yml @@ -0,0 +1,73 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role ecommerce +# +dependencies: + - role: edx_themes + theme_users: + - '{{ ecommerce_user }}' + when: ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING + - role: edx_django_service + edx_django_service_version: '{{ ECOMMERCE_VERSION }}' + edx_django_service_name: '{{ ecommerce_service_name }}' + edx_django_service_config_overrides: '{{ ecommerce_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ ecommerce_debian_pkgs + ecommerce_release_specific_debian_pkgs[ansible_distribution_release] }}' + edx_django_service_django_settings_module: '{{ ECOMMERCE_DJANGO_SETTINGS_MODULE }}' + edx_django_service_extra_requirements: '{{ ECOMMERCE_EXTRA_REQUIREMENTS }}' + edx_django_service_add_extra_requirements_to_requirements_file: '{{ ECOMMERCE_ADD_EXTRA_REQUIREMENTS_TO_REQUIREMENTS_FILE }}' + edx_django_service_repos: '{{ ECOMMERCE_REPOS }}' + edx_django_service_environment_extra: '{{ ecommerce_environment }}' + edx_django_service_gunicorn_extra: '{{ ECOMMERCE_GUNICORN_EXTRA }}' + edx_django_service_gunicorn_port: '{{ ecommerce_gunicorn_port }}' + edx_django_service_gunicorn_worker_class: "{{ ECOMMERCE_GUNICORN_WORKER_CLASS }}" + edx_django_service_gunicorn_max_requests: "{{ ECOMMERCE_GUNICORN_MAX_REQUESTS }}" + edx_django_service_gunicorn_workers: "{{ ECOMMERCE_GUNICORN_WORKERS }}" + edx_django_service_nginx_port: '{{ ECOMMERCE_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ ECOMMERCE_SSL_NGINX_PORT }}' + edx_django_service_use_python38: '{{ ECOMMERCE_USE_PYTHON38 }}' + edx_django_service_language_code: '{{ ECOMMERCE_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ ECOMMERCE_SECRET_KEY }}' + edx_django_service_memcache: '{{ ECOMMERCE_MEMCACHE }}' + edx_django_service_node_version: '{{ ECOMMERCE_NODE_VERSION }}' + edx_django_service_npm_version: '{{ ECOMMERCE_NPM_VERSION }}' + edx_django_service_databases: + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ ECOMMERCE_DATABASE_NAME}}' + USER: '{{ ECOMMERCE_DATABASE_USER }}' + PASSWORD: '{{ ECOMMERCE_DATABASE_PASSWORD }}' + HOST: '{{ ECOMMERCE_DATABASE_HOST }}' + PORT: 3306 + ATOMIC_REQUESTS: true + CONN_MAX_AGE: '{{ ECOMMERCE_DATABASE_CONN_MAX_AGE }}' + OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + edx_django_service_social_auth_edx_oauth2_key: '{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_social_auth_redirect_is_https: '{{ ECOMMERCE_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' + edx_django_service_session_expire_at_browser_close: '{{ ECOMMERCE_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_staticfiles_storage: '{{ ECOMMERCE_STATICFILES_STORAGE }}' + edx_django_service_post_migrate_commands: '{{ ecommerce_post_migrate_commands }}' + edx_django_service_basic_auth_exempted_paths_extra: + - payment + - \.well-known/apple-developer-merchantid-domain-association + edx_django_service_automated_users: '{{ ECOMMERCE_AUTOMATED_USERS }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ ECOMMERCE_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ ECOMMERCE_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ ECOMMERCE_COPY_CONFIG_ENABLED }}' + EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: '{{ ECOMMERCE_ENABLE_ADMIN_URLS_RESTRICTION }}' + EDX_DJANGO_SERVICE_ADMIN_URLS: '{{ ECOMMERCE_ADMIN_URLS }}' + - role: antivirus + ANTIVIRUS_SCAN_DIRECTORY: "{{ ECOMMERCE_ANTIVIRUS_SCAN_DIRECTORY }}" + when: ECOMMERCE_ENABLE_ANTIVIRUS diff --git a/playbooks/roles/ecommerce/tasks/main.yml b/playbooks/roles/ecommerce/tasks/main.yml new file mode 100644 index 00000000000..685c20f2a60 --- /dev/null +++ b/playbooks/roles/ecommerce/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: Create Apple Pay certificates directory + file: + path: "{{ ecommerce_apple_pay_merchant_certificate_directory }}" + state: directory + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + +- name: Write Apple Pay merchant certificates + copy: + content: "{{ ECOMMERCE_APPLE_PAY_MERCHANT_CERTIFICATE }}" + dest: "{{ ecommerce_apple_pay_merchant_certificate_path }}" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0644" + no_log: true + tags: + - install + - install:configuration diff --git a/playbooks/roles/ecomworker/defaults/main.yml b/playbooks/roles/ecomworker/defaults/main.yml new file mode 100644 index 00000000000..575df70021c --- /dev/null +++ b/playbooks/roles/ecomworker/defaults/main.yml @@ -0,0 +1,166 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Defaults for role ecomworker. +# + +ECOMMERCE_WORKER_GIT_IDENTITY: !!null +ECOMMERCE_WORKER_VERSION: 'master' +ECOMMERCE_WORKER_USE_PYTHON38: true + +ECOMMERCE_WORKER_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ COMMON_GIT_PATH }}' + REPO: ecommerce-worker.git + VERSION: '{{ ECOMMERCE_WORKER_VERSION }}' + DESTINATION: '{{ ecommerce_worker_code_dir }}' + SSH_KEY: '{{ ECOMMERCE_WORKER_GIT_IDENTITY }}' + +# Requires that New Relic be enabled via COMMON_ENABLE_NEWRELIC, and that +# a key be provided via NEWRELIC_LICENSE_KEY. +ECOMMERCE_WORKER_NEWRELIC_APPNAME: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ ecommerce_worker_service_name }}' +ECOMMERCE_WORKER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# CELERY +ECOMMERCE_WORKER_BROKER_USERNAME: '' +ECOMMERCE_WORKER_BROKER_PASSWORD: 'celery' +# Used as the default RabbitMQ IP. +ECOMMERCE_WORKER_BROKER_HOST: '{{ ansible_default_ipv4.address }}' +# Used as the default RabbitMQ port. +ECOMMERCE_WORKER_BROKER_PORT: 6379 +ECOMMERCE_WORKER_BROKER_TRANSPORT: 'redis' +# Default broker URL. See http://celery.readthedocs.org/en/latest/configuration.html#broker-url. +ECOMMERCE_WORKER_BROKER_URL: '{{ ECOMMERCE_WORKER_BROKER_TRANSPORT }}://{{ ECOMMERCE_WORKER_BROKER_USERNAME }}:{{ ECOMMERCE_WORKER_BROKER_PASSWORD }}@{{ ECOMMERCE_WORKER_BROKER_HOST }}:{{ ECOMMERCE_WORKER_BROKER_PORT }}' +ECOMMERCE_WORKER_CONCURRENCY: 4 +# END CELERY + +# ORDER FULFILLMENT +# Absolute URL used to construct API calls against the ecommerce service. +ECOMMERCE_WORKER_ECOMMERCE_API_ROOT: 'http://127.0.0.1:8002/api/v2/' + +# Long-lived access token used by Celery workers to authenticate against the ecommerce service. +ECOMMERCE_WORKER_WORKER_ACCESS_TOKEN: 'your-secret-here' + +# Maximum number of retries before giving up on the fulfillment of an order. +# For reference, 11 retries with exponential backoff yields a maximum waiting +# time of 2047 seconds (about 30 minutes). Defaulting this to None could yield +# unwanted behavior: infinite retries. +ECOMMERCE_WORKER_MAX_FULFILLMENT_RETRIES: 11 +# END ORDER FULFILLMENT + +# SAILTHRU INTEGRATION +# Set to false to ignore Sailthru events +# Sailthru support in ecommerce_worker sends purchase/enroll events to the email +# marketing system Sailthru for tracking the efficacy of email marketing campaigns. It does not +# have to be enabled for normal ecommerce operation. If it is enabled, the waffle switch +# sailthru_enable should be set to on in ecommerce as well or ecommerce won't send +# purchase/enroll events to ecommerce_worker. +ECOMMERCE_WORKER_SAILTHRU_ENABLE: false + +# Template used when user upgrades to verified +ECOMMERCE_WORKER_SAILTHRU_UPGRADE_TEMPLATE: !!null + +# Template used with user purchases a course +ECOMMERCE_WORKER_SAILTHRU_PURCHASE_TEMPLATE: !!null + +# Template used with user enrolls in a free course +ECOMMERCE_WORKER_SAILTHRU_ENROLL_TEMPLATE: !!null + +# Abandoned cart template +ECOMMERCE_WORKER_SAILTHRU_ABANDONED_CART_TEMPLATE: !!null + +# minutes to delay before abandoned cart message +ECOMMERCE_WORKER_SAILTHRU_ABANDONED_CART_DELAY: 60 + +# Sailthru key and secret required for integration +# Note: stage keys/secret should be taken from Sailthru Edx Dev account, prod should be from edX.org +ECOMMERCE_WORKER_SAILTHRU_KEY: 'sailthru key here' +ECOMMERCE_WORKER_SAILTHRU_SECRET: 'sailthru secret here' + +# Retry settings for Sailthru celery tasks +ECOMMERCE_WORKER_SAILTHRU_RETRY_SECONDS: 3600 +ECOMMERCE_WORKER_SAILTHRU_RETRY_ATTEMPTS: 6 + +# ttl for cached course content from Sailthru (in seconds) +ECOMMERCE_WORKER_SAILTHRU_CACHE_TTL_SECONDS: 3600 + +# dummy price for audit/honor (i.e., if cost = 0) +# Note: setting this value to 0 skips Sailthru calls for free transactions +ECOMMERCE_WORKER_SAILTHRU_MINIMUM_COST: 100 +# END SAILTHRU INTEGRATION + +# Ecommerce Worker settings +ECOMMERCE_WORKER_JWT_SECRET_KEY: 'insecure-secret-key' +ECOMMERCE_WORKER_JWT_ISSUER: 'ecommerce_worker' +ECOMMERCE_WORKER_SITE_OVERRIDES: !!null + +# Remote config +ECOMMERCE_WORKER_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ECOMMERCE_WORKER_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ECOMMERCE_WORKER_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +ECOMMERCE_WORKER_SERVICE_CONFIG: + BROKER_URL: '{{ ECOMMERCE_WORKER_BROKER_URL }}' + ECOMMERCE_API_ROOT: '{{ ECOMMERCE_WORKER_ECOMMERCE_API_ROOT }}' + JWT_SECRET_KEY: '{{ ECOMMERCE_WORKER_JWT_SECRET_KEY }}' + JWT_ISSUER: '{{ ECOMMERCE_WORKER_JWT_ISSUER }}' + MAX_FULFILLMENT_RETRIES: '{{ ECOMMERCE_WORKER_MAX_FULFILLMENT_RETRIES }}' + SAILTHRU: + SAILTHRU_ENABLE: '{{ ECOMMERCE_WORKER_SAILTHRU_ENABLE }}' + SAILTHRU_UPGRADE_TEMPLATE: '{{ ECOMMERCE_WORKER_SAILTHRU_UPGRADE_TEMPLATE }}' + SAILTHRU_PURCHASE_TEMPLATE: '{{ ECOMMERCE_WORKER_SAILTHRU_PURCHASE_TEMPLATE }}' + SAILTHRU_ENROLL_TEMPLATE: '{{ ECOMMERCE_WORKER_SAILTHRU_ENROLL_TEMPLATE }}' + SAILTHRU_ABANDONED_CART_TEMPLATE: '{{ ECOMMERCE_WORKER_SAILTHRU_ABANDONED_CART_TEMPLATE }}' + SAILTHRU_ABANDONED_CART_DELAY: '{{ ECOMMERCE_WORKER_SAILTHRU_ABANDONED_CART_DELAY }}' + SAILTHRU_KEY: '{{ ECOMMERCE_WORKER_SAILTHRU_KEY }}' + SAILTHRU_SECRET: '{{ ECOMMERCE_WORKER_SAILTHRU_SECRET }}' + SAILTHRU_RETRY_SECONDS: '{{ ECOMMERCE_WORKER_SAILTHRU_RETRY_SECONDS }}' + SAILTHRU_RETRY_ATTEMPTS: '{{ ECOMMERCE_WORKER_SAILTHRU_RETRY_ATTEMPTS }}' + SAILTHRU_CACHE_TTL_SECONDS: '{{ ECOMMERCE_WORKER_SAILTHRU_CACHE_TTL_SECONDS }}' + SAILTHRU_MINIMUM_COST: '{{ ECOMMERCE_WORKER_SAILTHRU_MINIMUM_COST }}' + templates: + course_refund: 'Course Refund' + assignment_email: 'Offer Assignment Email' + + # Site-specific configuration overrides. Implemented as a dict of dicts with 'site_code' for keys. + # Ecommerce worker will apply these settings instead of their corresponding default values. + # For example: + # SITE_OVERRIDES: { + # "site1": { + # "ECOMMERCE_API_ROOT": "http://ecommerce-subdomain.domain.com/" + # }, + # "site2": { + # "JWT_SECRET_KEY": "site2-secret-key", + # "JWT_ISSUER": "site2-worker" + # } + # } + SITE_OVERRIDES: '{{ ECOMMERCE_WORKER_SITE_OVERRIDES }}' + + +ecommerce_worker_environment: + WORKER_CONFIGURATION_MODULE: 'ecommerce_worker.configuration.production' + ECOMMERCE_WORKER_CFG: '{{ COMMON_CFG_DIR }}/{{ ecommerce_worker_service_name }}.yml' + +ecommerce_worker_service_name: 'ecomworker' +ecommerce_worker_user: '{{ ecommerce_worker_service_name }}' +ecommerce_worker_home: '{{ COMMON_APP_DIR }}/{{ ecommerce_worker_service_name }}' +ecommerce_worker_code_dir: '{{ ecommerce_worker_home }}/{{ ecommerce_worker_service_name }}' + +ecommerce_worker_log_dir: '{{ COMMON_LOG_DIR }}/{{ ecommerce_worker_service_name }}' + +ecommerce_worker_requirements_base: '{{ ecommerce_worker_code_dir }}/requirements' +ecommerce_worker_requirements: + - production.txt + - optional.txt + +# OS packages +ecommerce_worker_debian_pkgs: [] +ecommerce_worker_redhat_pkgs: [] diff --git a/playbooks/roles/ecomworker/meta/main.yml b/playbooks/roles/ecomworker/meta/main.yml new file mode 100644 index 00000000000..851b97f7976 --- /dev/null +++ b/playbooks/roles/ecomworker/meta/main.yml @@ -0,0 +1,27 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Includes for role ecommerce_worker. +# + +dependencies: + - common + - supervisor + - role: edx_service + edx_service_use_python38: '{{ ECOMMERCE_WORKER_USE_PYTHON38 }}' + edx_service_name: "{{ ecommerce_worker_service_name }}" + edx_service_config: "{{ ECOMMERCE_WORKER_SERVICE_CONFIG }}" + edx_service_repos: "{{ ECOMMERCE_WORKER_REPOS }}" + edx_service_user: "{{ ecommerce_worker_user }}" + edx_service_home: "{{ ecommerce_worker_home }}" + edx_service_packages: + debian: "{{ ecommerce_worker_debian_pkgs }}" + redhat: "{{ ecommerce_worker_redhat_pkgs }}" + edx_service_decrypt_config_enabled: "{{ ECOMMERCE_WORKER_DECRYPT_CONFIG_ENABLED }}" + edx_service_copy_config_enabled: "{{ ECOMMERCE_WORKER_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/ecomworker/tasks/main.yml b/playbooks/roles/ecomworker/tasks/main.yml new file mode 100644 index 00000000000..e68de294d29 --- /dev/null +++ b/playbooks/roles/ecomworker/tasks/main.yml @@ -0,0 +1,111 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Tasks for role ecommerce_worker. +# + +- name: install application requirements + pip: + requirements: '{{ ecommerce_worker_requirements_base }}/{{ item }}' + virtualenv: '{{ ecommerce_worker_home }}/venvs/{{ ecommerce_worker_service_name }}' + state: present + become_user: '{{ ecommerce_worker_user }}' + with_items: "{{ ecommerce_worker_requirements }}" + tags: + - install + - install:app-requirements + +- name: "Install Datadog APM requirements" + when: COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP + pip: + name: + - ddtrace + extra_args: "--exists-action w" + virtualenv: '{{ ecommerce_worker_home }}/venvs/{{ ecommerce_worker_service_name }}' + state: present + become_user: "{{ ecommerce_worker_user }}" + tags: + - install + - install:app-requirements + +- name: write out the supervisor wrapper + template: + src: edx/app/ecomworker/ecomworker.sh.j2 + dest: '{{ ecommerce_worker_home }}/{{ ecommerce_worker_service_name }}.sh' + mode: 0650 + owner: '{{ supervisor_user }}' + group: '{{ common_web_user }}' + tags: + - install + - install:configuration + +- name: write supervisord config + template: + src: edx/app/supervisor/conf.d.available/ecomworker.conf.j2 + dest: '{{ supervisor_available_dir }}/{{ ecommerce_worker_service_name }}.conf' + owner: '{{ supervisor_user }}' + group: '{{ common_web_user }}' + mode: 0644 + tags: + - install + - install:configuration + +- name: setup the ecommerce_worker env file + template: + src: './{{ ecommerce_worker_home }}/ecomworker_env.j2' + dest: '{{ ecommerce_worker_home }}/ecomworker_env' + owner: '{{ ecommerce_worker_user }}' + group: '{{ ecommerce_worker_user }}' + mode: 0644 + tags: + - install + - install:configuration + +- name: enable supervisor script + file: + src: '{{ supervisor_available_dir }}/{{ ecommerce_worker_service_name }}.conf' + dest: '{{ supervisor_cfg_dir }}/{{ ecommerce_worker_service_name }}.conf' + state: link + force: yes + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: update supervisor configuration + shell: '{{ supervisor_ctl }} -c {{ supervisor_cfg }} update' + when: not disable_edx_services + tags: + - manage + - manage:start + + +- name: create symlinks from the venv bin dir + file: + src: '{{ ecommerce_worker_home }}/venvs/{{ ecommerce_worker_service_name }}/bin/{{ item }}' + dest: '{{ COMMON_BIN_DIR }}/{{ item }}.ecommerce_worker' + state: link + with_items: + - python + - pip + tags: + - install + - install:app-requirements + +- name: restart the applicaton + supervisorctl: + state: restarted + supervisorctl_path: '{{ supervisor_ctl }}' + config: '{{ supervisor_cfg }}' + name: '{{ ecommerce_worker_service_name }}' + when: not disable_edx_services + become_user: '{{ supervisor_service_user }}' + tags: + - manage + - manage:start diff --git a/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker.sh.j2 b/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker.sh.j2 new file mode 100644 index 00000000000..82e5bf062f4 --- /dev/null +++ b/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker.sh.j2 @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set ecommerce_worker_venv_bin = ecommerce_worker_home + '/venvs/' + ecommerce_worker_service_name + '/bin' %} + +{% set executable = ecommerce_worker_venv_bin + '/celery' %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = ecommerce_worker_venv_bin + '/newrelic-admin run-program ' + ecommerce_worker_venv_bin + '/celery' %} + +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED='{{ ECOMMERCE_WORKER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' +export NEW_RELIC_APP_NAME='{{ ECOMMERCE_WORKER_NEWRELIC_APPNAME }}' +export NEW_RELIC_LICENSE_KEY='{{ NEWRELIC_LICENSE_KEY }}' +{% endif -%} + +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = ecommerce_worker_venv_bin + '/ddtrace-run ' + executable %} +export DD_TAGS="service:{{ ecommerce_worker_service_name }}" +export DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true +# Copied from edx_django_service playbook for consistency; Datadog +# trace debug logging issue doesn't actually affect edxapp for some +# reason. +export DD_TRACE_LOG_STREAM_HANDLER=false +{% endif -%} + +source {{ ecommerce_worker_home }}/{{ ecommerce_worker_service_name }}_env +# We exec so that celery is the child of supervisor and can be managed properly +exec {{ executable }} -A ecommerce_worker.celery_app:app worker --concurrency={{ ECOMMERCE_WORKER_CONCURRENCY }} --loglevel=info --hostname=ecomworker.%%h --queue=ecommerce.fulfillment,ecommerce.email_marketing,ecommerce.default diff --git a/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker_env.j2 b/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker_env.j2 new file mode 100644 index 00000000000..3265a143baa --- /dev/null +++ b/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker_env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% for name,value in ecommerce_worker_environment.items() -%} +{%- if value -%} +export {{ name }}="{{ value }}" +{% endif %} +{%- endfor %} diff --git a/playbooks/roles/ecomworker/templates/edx/app/supervisor/conf.d.available/ecomworker.conf.j2 b/playbooks/roles/ecomworker/templates/edx/app/supervisor/conf.d.available/ecomworker.conf.j2 new file mode 100644 index 00000000000..68861353a64 --- /dev/null +++ b/playbooks/roles/ecomworker/templates/edx/app/supervisor/conf.d.available/ecomworker.conf.j2 @@ -0,0 +1,12 @@ +# +# {{ ansible_managed }} +# +[program:{{ ecommerce_worker_service_name }}] + +command={{ ecommerce_worker_home }}/{{ ecommerce_worker_service_name }}.sh +user={{ common_web_user }} +directory={{ ecommerce_worker_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/edx-sandbox/tasks/main.yml b/playbooks/roles/edx-sandbox/tasks/main.yml new file mode 100644 index 00000000000..8e13f70e070 --- /dev/null +++ b/playbooks/roles/edx-sandbox/tasks/main.yml @@ -0,0 +1,31 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx-sandbox +# +# Overview: +# This role will be use to copy the motd to the sandbox in order to +# show the sandbox termination time as motd +# +# Dependencies: +# This role will only work on edx sandbox +# +# Example play: +# +# + +- name: update the termination date and time as motd + template: + dest: "/etc/update-motd.d/999-terminate-sandbox" + src: "etc/update-motd.d/terminate_motd.j2" + mode: 0755 + owner: root + group: root diff --git a/playbooks/roles/edx-sandbox/templates/etc/update-motd.d/terminate_motd.j2 b/playbooks/roles/edx-sandbox/templates/etc/update-motd.d/terminate_motd.j2 new file mode 100644 index 00000000000..5bb84f9ccb0 --- /dev/null +++ b/playbooks/roles/edx-sandbox/templates/etc/update-motd.d/terminate_motd.j2 @@ -0,0 +1,18 @@ +#!/usr/bin/python + +import boto +from boto.utils import get_instance_metadata +from datetime import timedelta +from datetime import datetime + +ec2 = boto.connect_ec2() +instance_id = get_instance_metadata()['instance-id'] +reservations = ec2.get_all_instances(instance_ids=[instance_id]) +instance = reservations[0].instances[0] + +if 'instance_termination_time' in instance.tags: + terminate_time = datetime.strptime(str(instance.tags['instance_termination_time']), "%m-%d-%Y %H:%M:%S") +else: + terminate_time = datetime.strptime(instance.launch_time, "%Y-%m-%dT%H:%M:%S.%fZ") + timedelta(days=7) + +print('This system will be terminated on {:%b %d, %Y}'.format(terminate_time)) diff --git a/playbooks/roles/edx_ansible/defaults/main.yml b/playbooks/roles/edx_ansible/defaults/main.yml index c60226a1760..467953700b3 100644 --- a/playbooks/roles/edx_ansible/defaults/main.yml +++ b/playbooks/roles/edx_ansible/defaults/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Defaults for role edx_ansible, an edx_ansible role to install edx_ansible @@ -13,16 +13,41 @@ # # OS packages # +# set this to true dump all extra vars +# this is currently broken when extra vars +# contains references to vars that are not +# included in the play. +EDX_ANSIBLE_DUMP_VARS: false -edx_ansible_debian_pkgs: - - python-pip +edx_ansible_debian_running_services: + - fail2ban + +edx_ansible_debian_pkgs_default: - python-apt + - libmysqlclient-dev - git-core - build-essential - - python-dev - libxml2-dev - libxslt1-dev - curl + - python-yaml + - python3-pip + - python3-mysqldb + +edx_ansible_release_specific_debian_pkgs: + xenial: + - python-pip + - python-mysqldb + - python-dev + bionic: + - python-pip + - python-mysqldb + - python-dev + focal: + - python3-dev + +edx_ansible_debian_pkgs: "{{ edx_ansible_debian_running_services + edx_ansible_debian_pkgs_default + edx_ansible_release_specific_debian_pkgs[ansible_distribution_release] }}" + edx_ansible_app_dir: "{{ COMMON_APP_DIR }}/edx_ansible" edx_ansible_code_dir: "{{ edx_ansible_app_dir }}/edx_ansible" edx_ansible_data_dir: "{{ COMMON_DATA_DIR }}/edx_ansible" @@ -30,8 +55,12 @@ edx_ansible_venvs_dir: "{{ edx_ansible_app_dir }}/venvs" edx_ansible_venv_dir: "{{ edx_ansible_venvs_dir }}/edx_ansible" edx_ansible_venv_bin: "{{ edx_ansible_venv_dir }}/bin" edx_ansible_user: "edx-ansible" -edx_ansible_source_repo: https://github.com/edx/configuration.git +edx_ansible_source_repo: https://github.com/openedx/configuration.git edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt" -edx_ansible_var_file: "{{ edx_ansible_data_dir }}/server-vars.yml" +edx_ansible_requirements_files: + - "{{ edx_ansible_code_dir }}/requirements/pip.txt" + - "{{ edx_ansible_code_dir }}/requirements.txt" + # edX configuration repo -configuration_version: master +CONFIGURATION_VERSION: master +edx_ansible_var_file: "{{ edx_ansible_app_dir }}/server-vars.yml" diff --git a/playbooks/roles/edx_ansible/meta/main.yml b/playbooks/roles/edx_ansible/meta/main.yml index 13b4a1dbb25..5fd7311d2a1 100644 --- a/playbooks/roles/edx_ansible/meta/main.yml +++ b/playbooks/roles/edx_ansible/meta/main.yml @@ -2,12 +2,13 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Role includes for role edx_ansible + dependencies: - - common + - common_vars diff --git a/playbooks/roles/edx_ansible/tasks/deploy.yml b/playbooks/roles/edx_ansible/tasks/deploy.yml index 99f071041a5..6875424651a 100644 --- a/playbooks/roles/edx_ansible/tasks/deploy.yml +++ b/playbooks/roles/edx_ansible/tasks/deploy.yml @@ -1,31 +1,88 @@ --- -- name: git checkout edx_ansible repo into edx_ansible_code_dir - git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }} - sudo_user: "{{ edx_ansible_user }}" +- name: Git checkout edx_ansible repo into edx_ansible_code_dir + git: + dest: "{{ edx_ansible_code_dir }}" + repo: "{{ edx_ansible_source_repo }}" + version: "{{ CONFIGURATION_VERSION }}" + accept_hostkey: yes + become_user: "{{ edx_ansible_user }}" + tags: + - install + - install:code -- name : install edx_ansible venv requirements - pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present - sudo_user: "{{ edx_ansible_user }}" +- name: Install edx_ansible venv requirements + pip: + requirements: "{{ edx_ansible_requirements_file }}" + virtualenv: "{{ edx_ansible_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + become_user: "{{ edx_ansible_user }}" + with_items: "{{ edx_ansible_requirements_files }}" + tags: + - install + - install:app-requirements -- name: create update script - template: > - dest={{ edx_ansible_app_dir}}/update - src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755 +- name: Create update script + template: + dest: "{{ edx_ansible_app_dir}}/update" + src: "update.j2" + owner: "{{ edx_ansible_user }}" + group: "{{ edx_ansible_user }}" + mode: "0755" + when: devstack is not defined or not devstack + tags: + - install + - install:configuration -- name: create a symlink for update.sh - file: > - src={{ edx_ansible_app_dir }}/update - dest={{ COMMON_BIN_DIR }}/update - state=link +- name: Create symlinks for update script + file: + src: "{{ edx_ansible_app_dir }}/update" + dest: "{{ COMMON_BIN_DIR }}/update" + state: link + when: devstack is not defined or not devstack + tags: + - install + - install:configuration -- name: create a symlink for var file - file: > - src={{ edx_ansible_var_file }} - dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }} - state=link +- name: Create utility scripts + template: + dest: "{{ edx_ansible_app_dir}}/{{ item.dest }}" + src: "{{ item.src }}" + owner: "{{ edx_ansible_user }}" + group: "{{ edx_ansible_user }}" + mode: "0755" + with_items: + - { src: 'show-repo-heads.j2', dest: 'show-repo-heads' } + - { src: 'pre-box.j2', dest: 'pre-box' } + tags: + - install + - install:configuration -- name: create a symlink for ansible-playbook - file: > - src={{ edx_ansible_venv_bin }}/ansible-playbook - dest={{ COMMON_BIN_DIR }}/ansible-playbook - state=link +- name: Create symlinks for utility scripts + file: + src: "{{ edx_ansible_app_dir }}/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item }}" + state: link + with_items: + - show-repo-heads + tags: + - install + - install:configuration + +- name: Create a symlink for ansible-playbook + file: + src: "{{ edx_ansible_venv_bin }}/ansible-playbook" + dest: "{{ COMMON_BIN_DIR }}/ansible-playbook" + state: link + tags: + - install + - install:configuration + +- name: Create a symlink for the playbooks dir + file: + src: "{{ edx_ansible_code_dir }}/playbooks" + dest: "{{ COMMON_CFG_DIR }}/playbooks" + state: link + tags: + - install + - install:configuration diff --git a/playbooks/roles/edx_ansible/tasks/main.yml b/playbooks/roles/edx_ansible/tasks/main.yml index 3aaa4e63009..6755493c372 100644 --- a/playbooks/roles/edx_ansible/tasks/main.yml +++ b/playbooks/roles/edx_ansible/tasks/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # Tasks for role edx_ansible @@ -23,25 +23,45 @@ # # # -- name: create application user - user: > - name="{{ edx_ansible_user }}" - home="{{ edx_ansible_app_dir }}" - createhome=no - shell=/bin/false +- name: Create application user + user: + name: "{{ edx_ansible_user }}" + home: "{{ edx_ansible_app_dir }}" + createhome: no + shell: /bin/false + tags: + - install:base -- name: create edx_ansible app and venv dir - file: > - path="{{ item }}" - state=directory - owner="{{ edx_ansible_user }}" - group="{{ common_web_group }}" +- name: Create edx_ansible app and venv dir + file: + path: "{{ item }}" + state: directory + owner: "{{ edx_ansible_user }}" + group: "{{ common_web_group }}" with_items: - "{{ edx_ansible_app_dir }}" - "{{ edx_ansible_data_dir }}" - "{{ edx_ansible_venvs_dir }}" + tags: + - install:base -- name: install a bunch of system packages on which edx_ansible relies - apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present +- name: Install a bunch of system packages on which edx_ansible relies + apt: + name: "{{ edx_ansible_debian_pkgs }}" + state: present + update_cache: true + tags: + - install:system-requirements -- include: deploy.yml tags=deploy +- include: deploy.yml + tags: + - deploy + +- name: Start and enable running services + ansible.builtin.systemd: + state: started + enabled: true + name: "{{ item }}" + with_items: "{{ edx_ansible_debian_running_services }}" + tags: + - install:system-requirements diff --git a/playbooks/roles/aws/templates/dumpall.yml.j2 b/playbooks/roles/edx_ansible/templates/dumpall.yml.j2 similarity index 100% rename from playbooks/roles/aws/templates/dumpall.yml.j2 rename to playbooks/roles/edx_ansible/templates/dumpall.yml.j2 diff --git a/playbooks/roles/edx_ansible/templates/pre-box.j2 b/playbooks/roles/edx_ansible/templates/pre-box.j2 new file mode 100644 index 00000000000..716b2163776 --- /dev/null +++ b/playbooks/roles/edx_ansible/templates/pre-box.j2 @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# +# Remove needless noise from file systems before packaging a Vagrant box file. +# This can reduce the size of the box file by 25%. +# Run this with sudo in the guest OS just before "vagrant package" in the host. + +set -x + +# Discard packages we don't need. +apt-get clean -y +apt-get autoclean -y + +# Clean out pip caches. +find / -type d -path '*/.cache/pip' -print -exec rm -rf '{}' \; + +# We used to remove all .pyc files. This caused problems in sandboxes, +# where code couldn't write .pyc files, and everything took too long. +# find /edx/app -name '*.pyc' -delete + +# Last thing: fill the disk with zeros so they'll compress well. +# The dd command fails because the disk fills, which is the point, so silence +# the error. +dd if=/dev/zero of=/empty bs=1M || true +rm -f /empty diff --git a/playbooks/roles/edx_ansible/templates/show-repo-heads.j2 b/playbooks/roles/edx_ansible/templates/show-repo-heads.j2 new file mode 100644 index 00000000000..28a3d18d0c4 --- /dev/null +++ b/playbooks/roles/edx_ansible/templates/show-repo-heads.j2 @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# Display the head commit for all our git repos + +echo "With status..." + +# Look in .git directories. Only two levels down to avoid virtualenvs. + +for d in {{ COMMON_APP_DIR }}/*/*/.git; do + d=$(dirname $d) + echo "---- $d ----" + git -C $d log -1 --format='%ci (%h) %cn: %s %d' + git -C $d status --short +done diff --git a/playbooks/roles/edx_ansible/templates/update.j2 b/playbooks/roles/edx_ansible/templates/update.j2 index b315f975b11..dbca95613bd 100644 --- a/playbooks/roles/edx_ansible/templates/update.j2 +++ b/playbooks/roles/edx_ansible/templates/update.j2 @@ -10,10 +10,17 @@ IFS="," Usage: $PROG -v add verbosity to edx_ansible run + -e add extra_vars to ansible plays -h this - - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, discern, edx-ora, configuration + - must be one of edx-platform, edx-workers, xqueue, cs_comments_service, credentials, configuration, + read-only-certificate-code, edx-analytics-data-api, edx-ora2, insights, ecommerce, discovery, + video_web_frontend, video_delivery_worker, veda_pipeline_worker, video_encode_worker, veda_ffmpeg, + registrar, program_console, learner_portal, prospectus, authn, payment, learning, ora_grading, enterprise_catalog - can be a commit or tag + - specify extra_vars to any of the ansible plays with the -e switch and then ecaptulating your vars in "double quotes" + example: update -e "-e 'hallo=bye' -e 'bye=hallo'" + you can also specify a file as -e "-e '@/path/to/file.yml'" EO IFS=$SAVE_IFS @@ -38,18 +45,48 @@ if [[ -f {{ edx_ansible_var_file }} ]]; then extra_args="-e@{{ edx_ansible_var_file }}" fi -declare -A repos_to_cmd -edx_ansible_cmd="{{ edx_ansible_venv_bin}}/ansible-playbook -i localhost, -c local --tags deploy $extra_args " +{% if devstack %} +extra_args="$extra_args -e 'disable_edx_services=true' -e devstack=true" +{% endif %} + +if [[ "$3" == "-e" ]]; then + opt=$4 + sed -e 's/^"//' -e 's/"$//' <<<"$opt" + extra_args="$extra_args $opt" +fi -repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'" -repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'" -repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'" -repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'" -repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'" -repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'" -repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'" -repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'" +declare -A repos_to_cmd +edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c local $extra_args" +repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'EDX_PLATFORM_VERSION=$2'" +repos_to_cmd["edx-workers"]="$edx_ansible_cmd edxapp.yml -e 'EDX_PLATFORM_VERSION=$2' -e 'celery_worker=true'" +repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'XQUEUE_VERSION=$2' -e 'elb_pre_post=false'" +repos_to_cmd["credentials"]="$edx_ansible_cmd credentials.yml -e 'credentials_version=$2'" +repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'FORUM_VERSION=$2'" +repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'CONFIGURATION_VERSION=$2'" +repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'" +repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'" +repos_to_cmd["insights"]="$edx_ansible_cmd insights.yml -e 'INSIGHTS_VERSION=$2'" +repos_to_cmd["ecommerce"]="$edx_ansible_cmd ecommerce.yml -e 'ECOMMERCE_VERSION=$2'" +repos_to_cmd["discovery"]="$edx_ansible_cmd discovery.yml -e 'DISCOVERY_VERSION=$2'" +repos_to_cmd["video_web_frontend"]="$edx_ansible_cmd veda_web_frontend.yml -e 'VEDA_WEB_FRONTEND_VERSION=$2'" +repos_to_cmd["video_delivery_worker"]="$edx_ansible_cmd veda_delivery_worker.yml -e 'VEDA_DELIVERY_WORKER_VERSION=$2'" +repos_to_cmd["veda_pipeline_worker"]="$edx_ansible_cmd veda_pipeline_worker.yml -e 'VEDA_PIPELINE_WORKER_VERSION=$2'" +repos_to_cmd["video_encode_worker"]="$edx_ansible_cmd veda_encode_worker.yml -e 'VEDA_ENCODE_WORKER_VERSION=$2'" +repos_to_cmd["veda_ffmpeg"]="$edx_ansible_cmd veda_ffmpeg.yml -e 'VEDA_FFMPEG_VERSION=$2'" +repos_to_cmd["registrar"]="$edx_ansible_cmd registrar.yml -e 'REGISTRAR_VERSION=$2'" +repos_to_cmd["learner_portal"]="$edx_ansible_cmd learner_portal.yml -e 'LEARNER_PORTAL_VERSION=$2'" +repos_to_cmd["program_console"]="$edx_ansible_cmd program_console.yml -e 'PROGRAM_CONSOLE_VERSION=$2'" +repos_to_cmd["prospectus"]="$edx_ansible_cmd prospectus.yml -e 'PROSPECTUS_VERSION=$2'" +repos_to_cmd["authn"]="$edx_ansible_cmd authn_frontend.yml -e 'AUTHN_MFE_VERSION=$2'" +repos_to_cmd["payment"]="$edx_ansible_cmd payment.yml -e 'PAYMENT_MFE_VERSION=$2'" +repos_to_cmd["learning"]="$edx_ansible_cmd learning.yml -e 'LEARNING_MFE_VERSION=$2'" +repos_to_cmd["course_authoring"]="$edx_ansible_cmd course_authoring.yml -e 'COURSE_AUTHORING_MFE_VERSION=$2'" +repos_to_cmd["library_authoring"]="$edx_ansible_cmd library_authoring.yml -e 'COURSE_AUTHORING_MFE_VERSION=$2'" +repos_to_cmd["ora_grading"]="$edx_ansible_cmd ora_grading.yml -e 'ORA_GRADING_MFE_VERSION=$2'" +repos_to_cmd["enterprise_catalog"]="$edx_ansible_cmd enterprise_catalog.yml -e 'ENTERPRISE_CATALOG_MFE_VERSION=$2'" +repos_to_cmd["profile"]="$edx_ansible_cmd profile.yml -e 'PROFILE_MFE_VERSION=$2'" +repos_to_cmd["learner_dashboard"]="$edx_ansible_cmd learner_dashboard.yml -e 'LEARNER_DASHBOARD_MFE_VERSION=$2'" if [[ -z $1 || -z $2 ]]; then echo @@ -65,5 +102,5 @@ if [[ -z ${repos_to_cmd[$1]} ]]; then exit 1 fi -cd {{ edx_ansible_code_dir }}/playbooks/edx-east +cd {{ edx_ansible_code_dir }}/playbooks eval "sudo ${repos_to_cmd["$1"]} $verbose" diff --git a/playbooks/roles/edx_django_service/defaults/main.yml b/playbooks/roles/edx_django_service/defaults/main.yml new file mode 100644 index 00000000000..19eb3a973b5 --- /dev/null +++ b/playbooks/roles/edx_django_service/defaults/main.yml @@ -0,0 +1,272 @@ +--- +edx_django_service_name: ' NOT-SET ' +edx_django_service_repo: '{{ edx_django_service_name }}' +edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ edx_django_service_name }}' +edx_django_service_user: '{{ edx_django_service_name }}' +edx_django_service_use_python3: true +edx_django_service_use_python38: false + +# This should be overwritten at the time Ansible is run. +edx_django_service_is_devstack: false + +edx_django_service_has_static_assets: true + +edx_django_service_wsgi_name: '{{ edx_django_service_name }}' + +edx_django_service_name_devstack_logs: + - '{{ supervisor_log_dir }}/{{ edx_django_service_name }}-stdout.log' + - '{{ supervisor_log_dir }}/{{ edx_django_service_name }}-stderr.log' + +edx_django_service_git_protocol: '{{ COMMON_GIT_PROTOCOL }}' +edx_django_service_git_domain: '{{ COMMON_GIT_MIRROR }}' +edx_django_service_git_path: '{{ COMMON_GIT_PATH }}' +edx_django_service_version: 'master' +edx_django_service_git_identity: null +edx_django_service_django_settings_module: null + +edx_django_service_app_config_file: "{{ COMMON_CFG_DIR }}/{{ edx_django_service_name }}.yml" + +edx_django_service_code_dir: '{{ edx_django_service_home }}/{{ edx_django_service_name }}' +edx_django_service_venv_dir: '{{ edx_django_service_home }}/venvs/{{ edx_django_service_name }}' +edx_django_service_venv_bin_dir: '{{ edx_django_service_venv_dir }}/bin' + +edx_django_service_nodeenv_dir: '{{ edx_django_service_home }}/nodeenvs/{{ edx_django_service_name }}' +edx_django_service_nodeenv_bin: '{{ edx_django_service_nodeenv_dir }}/bin' +edx_django_service_node_modules_dir: '{{ edx_django_service_code_dir }}/node_modules' +edx_django_service_node_bin: '{{ edx_django_service_node_modules_dir }}/.bin' +edx_django_service_node_version: '12.13.0' +edx_django_service_npm_version: '6.12.1' + +edx_django_service_environment_default: + DJANGO_SETTINGS_MODULE: '{{ edx_django_service_django_settings_module }}' + PATH: '{{ edx_django_service_nodeenv_bin }}:{{ edx_django_service_venv_dir }}/bin:{{ ansible_env.PATH }}' +edx_django_service_environment_extra: {} +edx_django_service_environment: '{{ edx_django_service_environment_default | combine(edx_django_service_environment_extra) }}' + +edx_django_service_migration_environment_default: + DB_MIGRATION_USER: '{{ COMMON_MYSQL_MIGRATE_USER }}' + DB_MIGRATION_PASS: '{{ COMMON_MYSQL_MIGRATE_PASS }}' +edx_django_service_migration_environment: '{{ edx_django_service_environment|combine(edx_django_service_migration_environment_default) }}' + +edx_django_service_debian_pkgs_default: + - gettext + - libffi-dev # Needed to install the Python cryptography library for asymmetric JWT signing + - libmemcached-dev + - libmysqlclient-dev + - libssl-dev + - pkg-config +edx_django_service_debian_pkgs_extra: [] +edx_django_service_debian_pkgs: '{{ edx_django_service_debian_pkgs_default + edx_django_service_debian_pkgs_extra }}' + +# List of additional python packages that should be installed into the +# service virtual environment. +# `name` (required), `version` (optional), and `extra_args` (optional) +# are supported and correspond to the options of ansible's pip module. +# Example: +# edx_django_service_extra_requirements: +# - name: mypackage +# version: 1.0.1 +# - name: git+https://git.myproject.org/MyProject#egg=MyProject +edx_django_service_extra_requirements: [] + +edx_django_service_gunicorn_extra: '' +edx_django_service_gunicorn_extra_conf: '' +edx_django_service_gunicorn_host: '127.0.0.1' +edx_django_service_gunicorn_port: null +edx_django_service_gunicorn_timeout: 300 +edx_django_service_gunicorn_workers: 2 +edx_django_service_gunicorn_worker_class: 'gevent' +edx_django_service_gunicorn_max_requests: null +edx_django_service_gunicorn_limit_request_field_size: 16384 + +edx_django_service_cors_whitelist: [] +edx_django_service_allow_cors_headers: false +edx_django_service_max_webserver_upload: !!null +edx_django_service_allow_cors_credentials: false + +nginx_edx_django_service_gunicorn_hosts: + - 127.0.0.1 + +edx_django_service_hostname: '~^((stage|prod)-)?{{ edx_django_service_name }}.*' +edx_django_service_nginx_port: '1{{ edx_django_service_gunicorn_port }}' +edx_django_service_nginx_read_timeout: !!null + +EDX_DJANGO_SERVICE_ENABLE_S3_MAINTENANCE: false +EDX_DJANGO_SERVICE_S3_MAINTENANCE_FILE: "/edx-static/maintenance/maintenance.html" + +edx_django_service_ssl_nginx_port: '4{{ edx_django_service_gunicorn_port }}' +edx_django_service_enable_basic_auth: false +edx_django_service_basic_auth_exempted_paths_default: + - api +edx_django_service_basic_auth_exempted_paths_extra: [] +edx_django_service_basic_auth_exempted_paths: '{{ edx_django_service_basic_auth_exempted_paths_default + edx_django_service_basic_auth_exempted_paths_extra }}' + +edx_django_service_newrelic_appname: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}' +edx_django_service_enable_newrelic_distributed_tracing: false + +edx_django_service_repos: + - PROTOCOL: '{{ edx_django_service_git_protocol }}' + DOMAIN: '{{ edx_django_service_git_domain }}' + PATH: '{{ edx_django_service_git_path }}' + REPO: '{{ edx_django_service_repo }}.git' + VERSION: '{{ edx_django_service_version }}' + DESTINATION: '{{ edx_django_service_code_dir }}' + SSH_KEY: '{{ edx_django_service_git_identity }}' + +edx_django_service_secret_key: null +edx_django_service_language_code: 'en-us' + +edx_django_service_data_dir: '{{ COMMON_DATA_DIR }}/{{ edx_django_service_name }}' +edx_django_service_media_root: '{{ edx_django_service_data_dir }}/media' +edx_django_service_media_url: '/media/' + +edx_django_service_static_root: '{{ COMMON_DATA_DIR }}/{{ edx_django_service_name }}/staticfiles' +edx_django_service_staticfiles_storage: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +edx_django_service_media_storage_backend: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ edx_django_service_media_root }}' + MEDIA_URL: '{{ edx_django_service_media_url }}' + +edx_django_service_memcache: [ 'memcache' ] + +edx_django_service_caches: + default: + BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' + KEY_PREFIX: '{{ edx_django_service_name }}' + LOCATION: '{{ edx_django_service_memcache }}' + +edx_django_service_default_db_host: 'localhost' +edx_django_service_default_db_name: '{{ edx_django_service_name }}' +edx_django_service_default_db_atomic_requests: false +edx_django_service_default_db_conn_max_age: 60 +edx_django_service_db_user: 'REPLACE-ME' +edx_django_service_db_password: 'password' +edx_django_service_db_options: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + +edx_django_service_databases: + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ edx_django_service_default_db_name }}' + USER: '{{ edx_django_service_db_user }}' + PASSWORD: '{{ edx_django_service_db_password }}' + HOST: '{{ edx_django_service_default_db_host }}' + PORT: '3306' + ATOMIC_REQUESTS: '{{ edx_django_service_default_db_atomic_requests }}' + CONN_MAX_AGE: '{{ edx_django_service_default_db_conn_max_age }}' + OPTIONS: '{{ edx_django_service_db_options }}' + +edx_django_service_social_auth_edx_oauth2_key: '{{ edx_django_service_name }}-sso-key' +edx_django_service_social_auth_edx_oauth2_secret: '{{ edx_django_service_name }}-sso-secret' +edx_django_service_backend_service_edx_oauth2_key: '{{ edx_django_service_name }}-backend-service-key' +edx_django_service_backend_service_edx_oauth2_secret: '{{ edx_django_service_name }}-backend-service-secret' +edx_django_service_social_auth_redirect_is_https: false + +edx_django_service_oauth2_url_root: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_oauth2_issuer: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_oauth2_logout_url: '{{ COMMON_OAUTH_LOGOUT_URL }}' +edx_django_service_oauth2_provider_url: '{{ COMMON_OAUTH_PUBLIC_URL_ROOT }}' + +edx_django_service_jwt_audience: '{{ COMMON_JWT_AUDIENCE }}' +edx_django_service_jwt_issuer: '{{ COMMON_JWT_ISSUER }}' +edx_django_service_jwt_secret_key: '{{ COMMON_JWT_SECRET_KEY }}' + +edx_django_service_session_expire_at_browser_close: false + +edx_django_service_jwt_auth: + JWT_ISSUERS: + - AUDIENCE: '{{ edx_django_service_jwt_audience }}' + ISSUER: '{{ edx_django_service_jwt_issuer }}' + SECRET_KEY: '{{ edx_django_service_jwt_secret_key }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + +edx_django_service_extra_apps: [] + +edx_django_service_api_root: !!null + +edx_django_service_config_default: + LANGUAGE_CODE: '{{ edx_django_service_language_code }}' + SECRET_KEY: '{{ edx_django_service_secret_key }}' + TIME_ZONE: 'UTC' + + STATIC_ROOT: '{{ edx_django_service_static_root }}' + MEDIA_STORAGE_BACKEND: '{{ edx_django_service_media_storage_backend }}' + STATICFILES_STORAGE: '{{ edx_django_service_staticfiles_storage }}' + + CACHES: '{{ edx_django_service_caches }}' + DATABASES: '{{ edx_django_service_databases }}' + + SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ edx_django_service_social_auth_redirect_is_https }}' + + SOCIAL_AUTH_EDX_OAUTH2_KEY: '{{ edx_django_service_social_auth_edx_oauth2_key }}' + SOCIAL_AUTH_EDX_OAUTH2_SECRET: '{{ edx_django_service_social_auth_edx_oauth2_secret }}' + SOCIAL_AUTH_EDX_OAUTH2_ISSUER: '{{ edx_django_service_oauth2_issuer }}' + SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: '{{ edx_django_service_oauth2_url_root }}' + SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: '{{ edx_django_service_oauth2_logout_url }}' + + BACKEND_SERVICE_EDX_OAUTH2_KEY: '{{ edx_django_service_backend_service_edx_oauth2_key }}' + BACKEND_SERVICE_EDX_OAUTH2_SECRET: '{{ edx_django_service_backend_service_edx_oauth2_secret }}' + BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: '{{ edx_django_service_oauth2_provider_url }}' + + JWT_AUTH: '{{ edx_django_service_jwt_auth }}' + + EXTRA_APPS: '{{ edx_django_service_extra_apps }}' + + EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: '{{ edx_django_service_oauth2_url_root }}/user_info' + + SESSION_EXPIRE_AT_BROWSER_CLOSE: '{{ edx_django_service_session_expire_at_browser_close }}' + + API_ROOT: '{{ edx_django_service_api_root }}' + +# NOTE: This should be overridden by inheriting service-specific role. +edx_django_service_config_overrides: {} +edx_django_service_config: '{{ edx_django_service_config_default|combine(edx_django_service_config_overrides) }}' + +edx_django_service_automated_users: {} + +# This array contains commands that should be run after migration. +# +# The commands will be executed from the code directory with the application's virtualenv activated. The migration +# environment (e.g. migration DB username/password) will NOT be used, so commands should not rely on these values being +# set. In other words, don't try to sneak in another run of the migrate management command. +# +# Example: +# edx_django_service_post_migrate_management_commands: +# - command: './manage.py conditional_command' +# when: '{{ foo }}' +# - command: './manage.py always_command' +# when: True +# +# In this example, the "conditional_command" will only be run when the variable `foo` is set to `True`. The +# "always_command" will always be run because its conditional is set to `True`. To minimize surprises, the `when` +# key *MUST* be supplied for all commands. +# +edx_django_service_post_migrate_commands: [] + +# This is a comma seperated list of services which will trigger checking migrations if they are found in the +# ec2 "services" tag. For most services this just needs to be the edx_django_service_name. In some cases it needs to be +# overidden, such as in the case of having workers. For example for edxapp it need to be "lms,cms,workers" and for +# enterprise_catalog it's "enterprise_catalog,enterprise_catalog_worker" +edx_django_service_migration_check_services: "{{ edx_django_service_name }}" + +EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: false + +EDX_DJANGO_SERVICE_ADMIN_URLS: [] + +edx_django_service_enable_celery_workers: false +edx_django_service_workers: [] +edx_django_service_celery_heartbeat_enabled: true +edx_django_service_lang: 'en_US.UTF-8' +edx_django_service_default_stopwaitsecs: 432000 +edx_django_service_workers_supervisor_conf: '{{ edx_django_service_name }}-workers.conf' + +edx_django_service_enable_experimental_docker_shim: false +# Default to blank if edx_django_service_enable_experimental_docker_shim is false +edx_django_service_docker_run_command_make_migrate: "" +edx_django_service_docker_run_command_make_static: "" +edx_django_service_docker_image_name: 'openedx/{{ edx_django_service_name }}' diff --git a/playbooks/roles/edx_django_service/meta/main.yml b/playbooks/roles/edx_django_service/meta/main.yml new file mode 100644 index 00000000000..52148cff43c --- /dev/null +++ b/playbooks/roles/edx_django_service/meta/main.yml @@ -0,0 +1,30 @@ +--- +dependencies: + - common + - role: docker-tools + when: edx_django_service_enable_experimental_docker_shim + docker_users: + - "www-data" + - "{{ supervisor_user }}" + - "{{ edx_django_service_user }}" + - role: supervisor + supervisor_spec: + - service: "{{ edx_django_service_name }}" + migration_check_services: "{{ edx_django_service_migration_check_services }}" + python: "{{ edx_django_service_venv_bin_dir }}/python" + code: "{{ edx_django_service_code_dir }}" + env: "{{ edx_django_service_home }}/{{ edx_django_service_name }}_env" + - role: automated + AUTOMATED_USERS: "{{ edx_django_service_automated_users }}" + ENABLE_DOCKER_ACCESS_FOR_AUTOMATED_USERS: "{{ edx_django_service_enable_experimental_docker_shim }}" + - role: edx_service + edx_service_name: "{{ edx_django_service_name }}" + edx_service_config: "{{ edx_django_service_config }}" + edx_service_repos: "{{ edx_django_service_repos }}" + edx_service_user: "{{ edx_django_service_user }}" + edx_service_home: "{{ edx_django_service_home }}" + edx_service_packages: + debian: "{{ edx_django_service_debian_pkgs }}" + redhat: [] + edx_service_decrypt_config_enabled: "{{ edx_django_service_decrypt_config_enabled }}" + edx_service_copy_config_enabled: "{{ edx_django_service_copy_config_enabled }}" diff --git a/playbooks/roles/edx_django_service/tasks/main.yml b/playbooks/roles/edx_django_service/tasks/main.yml new file mode 100644 index 00000000000..14004b11dcc --- /dev/null +++ b/playbooks/roles/edx_django_service/tasks/main.yml @@ -0,0 +1,479 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx_django_service +# +# Overview: +# For devstack set edx_django_service_is_devstack to true. +# +# Dependencies: +# +# +# Example play: +# +# + +- name: Get service user numeric id for docker + command: "id -u {{ edx_django_service_user }}" + register: edx_django_service_user_id + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: Docker build non-newrelic + when: edx_django_service_enable_experimental_docker_shim + command: docker build {{ edx_django_service_code_dir }} --target app -t {{ edx_django_service_docker_image_name }}:latest + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:app-requirements + +- name: Docker build newrelic + when: edx_django_service_enable_experimental_docker_shim and COMMON_ENABLE_NEWRELIC + command: docker build {{ edx_django_service_code_dir }} --target newrelic -t {{ edx_django_service_docker_image_name }}:latest-newrelic + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:app-requirements + +- name: add gunicorn configuration file + template: + src: "edx/app/app/app_gunicorn.py.j2" + dest: "{{ edx_django_service_home }}/{{ edx_django_service_name }}_gunicorn.py" + become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:configuration + +- name: install python3.8 + apt: + pkg: + - python3.8-dev + - python3.8-distutils + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: edx_django_service_use_python38 and not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: install python3 + apt: + name: "{{ item }}" + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: edx_django_service_use_python3 and not edx_django_service_enable_experimental_docker_shim + with_items: + - python3-pip + - python3-dev + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3.8 + command: "virtualenv --python=python3.8 {{ edx_django_service_venv_dir }}" + args: + creates: "{{ edx_django_service_venv_dir }}/bin/pip" + become_user: "{{ edx_django_service_user }}" + when: edx_django_service_use_python38 and not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=python3 {{ edx_django_service_venv_dir }}" + args: + creates: "{{ edx_django_service_venv_dir }}/bin/pip" + become_user: "{{ edx_django_service_user }}" + when: edx_django_service_use_python3 and not edx_django_service_use_python38 and not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: build virtualenv with python2.7 + command: "virtualenv --python=python2.7 {{ edx_django_service_venv_dir }}" + args: + creates: "{{ edx_django_service_venv_dir }}/bin/pip" + become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_use_python3 and not edx_django_service_use_python38 and not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: Pin pip to a specific version. + command: "{{ edx_django_service_venv_dir }}/bin/pip install pip=={{ COMMON_PIP_VERSION }}" + become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +# NOTE (CCB): Ideally we should use the pip Ansible command, +# but that doesn't seem to work with the Python 3.x virtualenv. +- name: install nodenv + command: pip install nodeenv + become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:system-requirements + +- name: create nodeenv + command: "nodeenv {{ edx_django_service_nodeenv_dir }} --node={{ edx_django_service_node_version }} --prebuilt" + args: + creates: "{{ edx_django_service_nodeenv_dir }}" + become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim + register: node_download_result + until: node_download_result is succeeded + retries: 5 + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:system-requirements + +- name: upgrade npm + command: "npm install -g npm@{{ edx_django_service_npm_version }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:system-requirements + when: edx_django_service_npm_version is defined and not edx_django_service_enable_experimental_docker_shim + +- name: install production requirements + command: make production-requirements + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:app-requirements + +- name: install development requirements + command: make requirements + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + when: edx_django_service_is_devstack is defined and edx_django_service_is_devstack and not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:app-requirements + - devstack + - devstack:install + +- name: install extra requirements + pip: + name: "{{ item.name }}" + version: "{{ item.version|default(omit) }}" + extra_args: "--exists-action w {{ item.extra_args|default('') }}" + virtualenv: "{{ edx_django_service_venv_dir }}" + state: present + with_items: "{{ edx_django_service_extra_requirements }}" + become_user: "{{ edx_django_service_user }}" + tags: + - install + - install:app-requirements + +- name: add extra requirements to extra.txt + lineinfile: + path: "{{ edx_django_service_code_dir }}/requirements/extra.txt" + line: "{{ item.name }}" + become_user: "{{ edx_django_service_user }}" + with_items: "{{ edx_django_service_extra_requirements }}" + when: edx_django_service_add_extra_requirements_to_requirements_file is defined and edx_django_service_add_extra_requirements_to_requirements_file + tags: + - install + - install:app-requirements + +- name: "Install Datadog APM requirements" + when: COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP + pip: + name: + - ddtrace + extra_args: "--exists-action w {{ item.extra_args|default('') }}" + virtualenv: "{{ edx_django_service_venv_dir }}" + state: present + become_user: "{{ edx_django_service_user }}" + tags: + - install + - install:app-requirements + +- name: Check for existing make_migrate container + command: "docker ps -aq --filter name='{{ edx_django_service_name }}.make_migrate'" + register: edx_django_service_make_migrate_container + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: Delete existing make_migrate container + when: edx_django_service_enable_experimental_docker_shim and edx_django_service_make_migrate_container.stdout != "" + command: "docker rm {{ edx_django_service_make_migrate_container.stdout }}" + +- name: Set edx_django_service_docker_run_command_make_migrate if docker shim enabled + set_fact: + edx_django_service_docker_run_command_make_migrate: "docker run --user root --name {{ edx_django_service_name }}.make_migrate --env DB_MIGRATION_USER --env DB_MIGRATION_PASS --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} {{ edx_django_service_docker_image_name }}:latest" + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: migrate database + command: "{{ edx_django_service_docker_run_command_make_migrate }} make migrate" + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_migration_environment }}" + when: migrate_db is defined and migrate_db|lower == "yes" + run_once: yes + tags: + - migrate + - migrate:db + +- name: run post-migrate commands + command: "{{ item.command }}" + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + with_items: '{{ edx_django_service_post_migrate_commands }}' + when: migrate_db is defined and migrate_db|lower == "yes" and item.when|bool and not edx_django_service_enable_experimental_docker_shim + run_once: yes + tags: + - migrate + - migrate:db + - migrate:post + +- name: ensure log files exist for tailing + file: + path: "{{ item }}" + state: touch + owner: "{{ common_web_user }}" + group: "{{ common_web_user }}" + with_items: '{{ edx_django_service_name_devstack_logs }}' + tags: + - install + - install:configuration + +- name: write out the supervisor wrapper + template: + src: "edx/app/app/app.sh.j2" + dest: "{{ edx_django_service_home }}/{{ edx_django_service_name }}.sh" + mode: 0650 + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + +- name: write out the celery workers supervisor wrapper + when: edx_django_service_enable_celery_workers + template: + src: "edx/app/app/app-workers.sh.j2" + dest: "{{ edx_django_service_home }}/{{ edx_django_service_name }}-workers.sh" + mode: 0650 + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + +- name: write supervisord config + template: + src: "edx/app/supervisor/conf.d.available/app.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ edx_django_service_name }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: write celery workers supervisord config + when: edx_django_service_enable_celery_workers + template: + src: "edx/app/supervisor/conf.d.available/app-workers.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ edx_django_service_workers_supervisor_conf }}" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: write devstack script + template: + src: "edx/app/app/devstack.sh.j2" + dest: "{{ edx_django_service_home }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: edx_django_service_is_devstack is defined and edx_django_service_is_devstack and not edx_django_service_enable_experimental_docker_shim + tags: + - devstack + - devstack:install + +- name: setup the app env file + template: + src: "edx/app/app/app_env.j2" + dest: "{{ edx_django_service_home }}/{{ edx_django_service_name }}_env" + owner: "{{ edx_django_service_user }}" + group: "{{ edx_django_service_user }}" + mode: 0644 + when: not edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:configuration + +- name: enable supervisor script + file: + src: "{{ supervisor_available_dir }}/{{ edx_django_service_name }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ edx_django_service_name }}.conf" + state: link + force: yes + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: enable celery worker supervisor script + when: edx_django_service_enable_celery_workers and not disable_edx_services + file: + src: "{{ supervisor_available_dir }}/{{ edx_django_service_workers_supervisor_conf }}" + dest: "{{ supervisor_cfg_dir }}/{{ edx_django_service_workers_supervisor_conf }}" + state: link + force: yes + tags: + - install + - install:configuration + +- name: update supervisor configuration + command: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: create symlinks from the repo dir + file: + src: "{{ edx_django_service_code_dir }}/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ edx_django_service_name }}" + state: link + when: not edx_django_service_enable_experimental_docker_shim + with_items: + - manage.py + tags: + - install + - install:app-requirements + +- name: Check for existing make_static container + command: "docker ps -aq --filter name='{{ edx_django_service_name }}.make_static'" + register: edx_django_service_make_static_container + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: Delete existing make_static container + when: edx_django_service_enable_experimental_docker_shim and edx_django_service_make_static_container.stdout != "" + command: "docker rm {{ edx_django_service_make_static_container.stdout }}" + +- name: Set edx_django_service_docker_run_command_make_static if docker shim enabled + set_fact: + edx_django_service_docker_run_command_make_static: "docker run --user root --name {{ edx_django_service_name }}.make_static --mount type=bind,src={{ edx_django_service_static_root }},dst={{ edx_django_service_static_root }} --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} {{ edx_django_service_docker_image_name }}:latest" + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: compile static assets + command: "{{ edx_django_service_docker_run_command_make_static }} make static" + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + when: edx_django_service_has_static_assets + tags: + - assets + - assets:gather + +- name: restart the application + supervisorctl: + state: restarted + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + name: "{{ edx_django_service_name }}" + when: not disable_edx_services + become_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:start + +- name: Copying nginx configs for the service + template: + src: "edx/app/nginx/sites-available/app.j2" + dest: "{{ nginx_sites_available_dir }}/{{ edx_django_service_name }}" + owner: root + group: "{{ common_web_user }}" + mode: 0640 + when: nginx_app_dir is defined + notify: reload nginx + tags: + - install + - install:vhosts + +- name: Creating nginx config links for the service + file: + src: "{{ nginx_sites_available_dir }}/{{ edx_django_service_name }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ edx_django_service_name }}" + state: link + owner: root + group: root + when: nginx_app_dir is defined + notify: reload nginx + tags: + - install + - install:vhosts + +#TODO jdmulloy docker build +- name: Include JWT signature setting in the app config file + include_role: + name: jwt_signature + when: CONFIGURE_JWTS + vars: + app_name: '{{ edx_django_service_name }}' + app_config_file: "{{ edx_django_service_app_config_file }}" + app_config_owner: root + app_config_group: root + app_config_mode: 0644 + +- name: Create docker shim flag file + command: "touch /edx/etc/docker_shim_enabled" + when: edx_django_service_enable_experimental_docker_shim diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/app-workers.sh.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/app-workers.sh.j2 new file mode 100644 index 00000000000..f1dd4d1b37d --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/app-workers.sh.j2 @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edx_django_service_venv_bin = edx_django_service_venv_dir + "/bin" %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edx_django_service_venv_bin + '/newrelic-admin run-program ' + edx_django_service_venv_bin + '/celery' %} +{% else %} +{% set executable = edx_django_service_venv_bin + '/celery' %} +{% endif %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +HOSTNAME=$(hostname) +export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +{% endif %} + +source {{ edx_django_service_home }}/{{ edx_django_service_name }}_env + +{% if edx_django_service_enable_experimental_docker_shim %} + +OLD_CONTAINERS=$(docker ps -aq --filter name="^{{ edx_django_service_name }}-worker-${DOCKER_WORKER_QUEUE}-${DOCKER_WORKER_NUM}\$") + +# Delete old container so we can reuse the contianer name +if [[ -n "${OLD_CONTAINERS}" ]]; then + docker rm ${OLD_CONTAINERS} > /dev/null 2>&1 +fi + +# We exec so that celery is the child of supervisor and can be managed properly +exec docker run \ + --name {{ edx_django_service_name }}-worker-${DOCKER_WORKER_QUEUE}-${DOCKER_WORKER_NUM} \ + --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} \ +{% if COMMON_ENABLE_NEWRELIC_APP %} + --env NEW_RELIC_DISTRIBUTED_TRACING_ENABLED \ + --env NEW_RELIC_APP_NAME \ + --env NEW_RELIC_PROCESS_HOST_DISPLAY_NAME \ + --env NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" \ +{% endif -%} +{% for name,value in edx_django_service_environment.items() -%} +{%- if value %} + --env {{ name }} \ +{% endif %} +{%- endfor %} +{% if COMMON_ENABLE_NEWRELIC_APP %} + {{ edx_django_service_docker_image_name }}:latest-newrelic \ + newrelic-admin run-program \ +{% else %} + {{ edx_django_service_docker_image_name }}:latest \ +{% endif %} + celery \ + $@ +{% else %} +# We exec so that celery is the child of supervisor and can be managed properly +exec {{ executable }} $@ +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/app.sh.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/app.sh.j2 new file mode 100644 index 00000000000..17bac3789ee --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/app.sh.j2 @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edx_django_service_venv_bin = edx_django_service_venv_dir + "/bin" %} + +{% set executable = edx_django_service_venv_bin + '/gunicorn' %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edx_django_service_venv_bin + '/newrelic-admin run-program ' + executable %} + +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ edx_django_service_enable_newrelic_distributed_tracing }}" +export NEW_RELIC_APP_NAME="{{ edx_django_service_newrelic_appname }}" +HOSTNAME=$(hostname) +export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = edx_django_service_venv_bin + '/ddtrace-run ' + executable %} +export DD_TAGS="service:{{ edx_django_service_name }}" +export DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true +# Workaround for +# https://github.com/edx/edx-arch-experiments/issues/591 (heavy +# streams of trace-debug logs from ddtrace.) +# +# ddtrace is behaving as if DD_TRACE_DEBUG=true, even though that +# should be false by default, and we're not setting it anywhere that +# we can find. Overriding it to false doesn't work, and none of the +# other trace-related configs that are documented seem to help, but +# from testing DD_TRACE_LOG_STREAM_HANDLER=false seems to help. +export DD_TRACE_LOG_STREAM_HANDLER=false +{% endif -%} + +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}" + +source {{ edx_django_service_home }}/{{ edx_django_service_name }}_env + +{% if edx_django_service_enable_experimental_docker_shim %} + +OLD_CONTAINERS=$(docker ps -aq --filter name='^{{ edx_django_service_name }}$') + +# Delete old container so we can reuse the contianer name +if [[ -n "${OLD_CONTAINERS}" ]]; then + docker rm ${OLD_CONTAINERS} > /dev/null 2>&1 +fi + +exec docker run \ + --name {{ edx_django_service_name }} \ + --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} \ + --publish 127.0.0.1:{{ edx_django_service_gunicorn_port }}:{{ edx_django_service_gunicorn_port }}/tcp \ +{% if COMMON_ENABLE_NEWRELIC_APP %} + --env NEW_RELIC_DISTRIBUTED_TRACING_ENABLED \ + --env NEW_RELIC_APP_NAME \ + --env NEW_RELIC_PROCESS_HOST_DISPLAY_NAME \ + --env NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" \ +{% endif -%} +{% for name,value in edx_django_service_environment.items() -%} +{%- if value %} + --env {{ name }} \ +{% endif %} +{%- endfor %} +{% if COMMON_ENABLE_NEWRELIC_APP %} + {{ edx_django_service_docker_image_name }}:latest-newrelic +{% else %} + {{ edx_django_service_docker_image_name }}:latest +{% endif %} +{% else %} +exec {{ executable }} -c {{ edx_django_service_home }}/{{ edx_django_service_name }}_gunicorn.py {{ edx_django_service_gunicorn_extra }} {{ edx_django_service_wsgi_name }}.wsgi:application +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/app_env.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/app_env.j2 new file mode 100644 index 00000000000..35b408f1a36 --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/app_env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% for name,value in edx_django_service_environment.items() -%} +{%- if value -%} +export {{ name }}="{{ value }}" +{% endif %} +{%- endfor %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/app_gunicorn.py.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/app_gunicorn.py.j2 new file mode 100644 index 00000000000..defe8fcb67e --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/app_gunicorn.py.j2 @@ -0,0 +1,22 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html +{{ ansible_managed }} +""" + +timeout = {{ edx_django_service_gunicorn_timeout }} +bind = "{{ edx_django_service_gunicorn_host }}:{{ edx_django_service_gunicorn_port }}" +pythonpath = "{{ edx_django_service_code_dir }}" +workers = {{ edx_django_service_gunicorn_workers }} +worker_class = "{{ edx_django_service_gunicorn_worker_class }}" + +{% if edx_django_service_gunicorn_limit_request_field_size -%} +limit_request_field_size = {{ edx_django_service_gunicorn_limit_request_field_size }} +{% endif %} + +{% if edx_django_service_gunicorn_max_requests -%} +max_requests = {{ edx_django_service_gunicorn_max_requests }} +{% endif %} + +{{ edx_django_service_gunicorn_extra_conf }} + +{{ common_pre_request }} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/devstack.sh.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/devstack.sh.j2 new file mode 100644 index 00000000000..e31b87dc5e1 --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/devstack.sh.j2 @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ edx_django_service_home }}/{{ edx_django_service_name }}_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . {{ edx_django_service_nodeenv_bin }}/activate + . {{ edx_django_service_venv_bin_dir }}/activate + cd {{ edx_django_service_code_dir }} + + /bin/bash + ;; + exec) + shift + + . {{ edx_django_service_nodeenv_bin }}/activate + . {{ edx_django_service_venv_bin_dir }}/activate + cd {{ edx_django_service_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/app.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/app.j2 new file mode 100644 index 00000000000..d051e6adc62 --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/app.j2 @@ -0,0 +1,34 @@ +# +# {{ ansible_managed }} +# + +{% include "concerns/upstream.j2"%} +{% include "concerns/cors-build-map.j2" %} + +server { + server_name {{ edx_django_service_hostname }}; + listen {{ edx_django_service_nginx_port }}; + +{% if NGINX_ENABLE_SSL %} + {% include "concerns/handle-ip-disclosure.j2" %} + rewrite ^ https://$host$request_uri? permanent; +{% else %} + {% if NGINX_REDIRECT_TO_HTTPS %} + {% include "concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2" %} + {% include "concerns/handle-tls-terminated-elsewhere-redirect.j2" %} + {% endif %} + {% include "concerns/app-common.j2" %} +{% endif %} +} + +{% if NGINX_ENABLE_SSL %} +server { + server_name {{ edx_django_service_hostname }}; + listen {{ edx_django_service_ssl_nginx_port }} ssl; + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + {% include "concerns/app-common.j2" %} +} +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 new file mode 100644 index 00000000000..52d79ba7913 --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 @@ -0,0 +1,11 @@ +{% if NGINX_ADMIN_ACCESS_CIDRS and EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION %} + location ~ ^/({{ EDX_DJANGO_SERVICE_ADMIN_URLS|join("|") }}) { + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} + allow {{ cidr }}; + {% endfor %} + deny all; + try_files $uri @proxy_to_app; + } +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/app-common.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/app-common.j2 new file mode 100644 index 00000000000..d605cca610d --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/app-common.j2 @@ -0,0 +1,15 @@ + +{% include "concerns/s3_maintenance.j2" %} +{% include "concerns/static-assets.j2" %} +{% include "concerns/proxy-to-app.j2" %} +{% if edx_django_service_max_webserver_upload %} + client_max_body_size {{ edx_django_service_max_webserver_upload }}M; +{% endif %} +{% if edx_django_service_nginx_read_timeout %} + proxy_read_timeout {{ edx_django_service_nginx_read_timeout }}; +{% endif %} +{% if NGINX_ENABLE_REQUEST_TRACKING_ID %} + + # To Track requests + add_header X-Request-ID $request_tracking_id; +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 new file mode 100644 index 00000000000..4652960d2be --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 @@ -0,0 +1,17 @@ +{% if edx_django_service_enable_basic_auth|bool %} + satisfy any; + + allow 127.0.0.1; + + {% for cidr in COMMON_BASIC_AUTH_EXCEPTIONS %} + allow {{ cidr }}; + {% endfor %} + + deny all; + + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + + index index.html + proxy_set_header X-Forwarded-Proto https; +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 new file mode 100644 index 00000000000..8c693f4f9ca --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 @@ -0,0 +1,16 @@ + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' $cors_origin; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'Authorization, USE-JWT-COOKIE'; + {% if edx_django_service_allow_cors_credentials %} + add_header 'Access-Control-Allow-Credentials' true; + {% endif %} + add_header 'Access-Control-Max-Age' 86400; + add_header 'Content-Type' 'text/plain; charset=utf-8'; + add_header 'Content-Length' 0; + return 204; + } + + add_header 'Access-Control-Allow-Origin' $cors_origin always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; + add_header 'Access-Control-Allow-Credentials' true always; diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 new file mode 100644 index 00000000000..06b27cca41e --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 @@ -0,0 +1,15 @@ + + +# The Origin request header indicates where a fetch originates from. It doesn't include any path information, +# but only the server name (e.g. https://www.example.com). +# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin for details. + +# Here we set the value that is included in the Access-Control-Allow-Origin response header. If the origin is one +# of our known hosts--served via HTTP or HTTPS--we allow for CORS. Otherwise, we set the "null" value, disallowing CORS. + +map $http_origin $cors_origin { +default "null"; +{% for host in edx_django_service_cors_whitelist %} + "~*^https?:\/\/{{ host|replace('.', '\.') }}$" $http_origin; +{% endfor %} +} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 new file mode 100644 index 00000000000..f7267de637d --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 @@ -0,0 +1,12 @@ +# If you are changing this be warned that it lives in multiple places +# there is a TLS redirect to same box, and a TLS redirect to externally terminated TLS +# version of this in nginx and in edx_django_service role. + +{% if NGINX_ALLOW_PRIVATE_IP_ACCESS %} +# This regexp matches only public IP addresses. +if ($host ~ "(\d+)(? 0 %} +location /robots.txt { + root {{ nginx_app_dir }}; + try_files $uri /robots.txt =404; +} +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 new file mode 100644 index 00000000000..3fda8538db9 --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 @@ -0,0 +1,26 @@ +{% if EDX_DJANGO_SERVICE_ENABLE_S3_MAINTENANCE %} + # Do not include a 502 error in NGINX_ERROR_PAGES when + # EDX_DJANGO_SERVICE_ENABLE_S3_MAINTENANCE is enabled. + + # Return a 503 instead so that it passes through Cloudflare + error_page 502 =503 @maintenance; + + # This section of the file was copied from playbooks/roles/nginx/templates/edx/app/nginx/sites-available/ + # modifications should be made to both files if necessary. + + {% if EDX_DJANGO_SERVICE_ENABLE_S3_MAINTENANCE %} + location @maintenance { + rewrite ^(.*) {{ EDX_DJANGO_SERVICE_S3_MAINTENANCE_FILE }} break; + proxy_http_version 1.1; + proxy_set_header Host s3.amazonaws.com; + proxy_set_header Authorization ''; + proxy_hide_header x-amz-id-2; + proxy_hide_header x-amz-request-id; + proxy_hide_header Set-Cookie; + proxy_ignore_headers "Set-Cookie"; + proxy_buffering off; + proxy_intercept_errors on; + proxy_pass https://s3.amazonaws.com; + } + {% endif %} +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 new file mode 100644 index 00000000000..ca7d5e9d59c --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 @@ -0,0 +1,15 @@ +location ~ ^/static/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ edx_django_service_name }}; + {% include "concerns/cors-add-header.j2" %} + + # Inform downstream caches to take certain headers into account when reading/writing to cache. + add_header 'Vary' 'Accept-Encoding,Origin'; + + try_files /staticfiles/$file =404; +} + +location ~ ^/media/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ edx_django_service_name }}; + try_files /media/$file =404; +} + diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/upstream.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/upstream.j2 new file mode 100644 index 00000000000..b1da558cb4a --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/upstream.j2 @@ -0,0 +1,5 @@ +upstream {{ edx_django_service_name }}_app_server { +{% for host in nginx_edx_django_service_gunicorn_hosts %} + server {{ host }}:{{ edx_django_service_gunicorn_port }} fail_timeout=0; +{% endfor %} +} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app-workers.conf.j2 b/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app-workers.conf.j2 new file mode 100644 index 00000000000..b7b9e3ca326 --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app-workers.conf.j2 @@ -0,0 +1,45 @@ +{% if edx_django_service_enable_experimental_docker_shim %} +{% for w in edx_django_service_workers %} +{% for count in range(1, 1 + w.concurrency) %} +[program:{{ w.queue }}_{{ count }}] + +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ edx_django_service_newrelic_appname }}-workers,NEW_RELIC_DISTRIBUTED_TRACING_ENABLED={{ edx_django_service_enable_newrelic_distributed_tracing }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY=1,LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ worker_django_settings_module }},LANG={{ edx_django_service_lang }},PYTHONPATH={{ edx_django_service_code_dir }},BOTO_CONFIG="{{ edx_django_service_home }}/.boto",EDX_REST_API_CLIENT_NAME={{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}-worker-{{ w.queue }},DOCKER_WORKER_QUEUE={{ w.queue }},DOCKER_WORKER_NUM={{ count }} + +user={{ common_web_user }} +directory={{ edx_django_service_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log + +command={{ edx_django_service_home }}/{{ edx_django_service_name }}-workers.sh --app {{ edx_django_service_name }}.celery:app worker -A {{ edx_django_service_name }} --loglevel=info --queue={{ w.queue }} --hostname={{ edx_django_service_name }}.{{ w.queue }}.%%h --concurrency=1 {{ '--max-tasks-per-child ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} {{ '--without-heartbeat' if not edx_django_service_celery_heartbeat_enabled|bool else '' }} +killasgroup=true +stopwaitsecs={{ w.stopwaitsecs | default(edx_django_service_default_stopwaitsecs) }} + +{% endfor %} +{% endfor %} + +[group:{{ edx_django_service_name }}_workers] +programs={%- for w in edx_django_service_workers %}{%- for c in range(1, 1 + w.concurrency) %}{{ w.queue }}_{{ c }}{%- if not loop.last %},{%- endif %}{%- endfor %}{%- if not loop.last %},{%- endif %}{%- endfor %} +{% else %} +{% for w in edx_django_service_workers %} +[program:{{ w.queue }}_{{ w.concurrency }}] + +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ edx_django_service_newrelic_appname }}-workers,NEW_RELIC_DISTRIBUTED_TRACING_ENABLED={{ edx_django_service_enable_newrelic_distributed_tracing }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ worker_django_settings_module }},LANG={{ edx_django_service_lang }},PYTHONPATH={{ edx_django_service_code_dir }},BOTO_CONFIG="{{ edx_django_service_home }}/.boto",EDX_REST_API_CLIENT_NAME={{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}-worker-{{ w.queue }} +user={{ common_web_user }} +directory={{ edx_django_service_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log + +{% set worker_app_name = edx_django_service_name %} +{% if edx_django_service_name == 'discovery' %} +{% set worker_app_name = 'course_discovery' %} +{% endif %} + +command={{ edx_django_service_home }}/{{ edx_django_service_name }}-workers.sh --app {{ worker_app_name }}.celery:app worker --loglevel=info --queues={{ w.queue }} --hostname={{ edx_django_service_name }}.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--max-tasks-per-child ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} {{ '--without-heartbeat' if not edx_django_service_celery_heartbeat_enabled|bool else '' }} +killasgroup=true +stopwaitsecs={{ w.stopwaitsecs | default(edx_django_service_default_stopwaitsecs) }} + +{% endfor %} + +[group:{{ edx_django_service_name }}_workers] +programs={%- for w in edx_django_service_workers %}{{ w.queue }}_{{ w.concurrency }}{%- if not loop.last %},{%- endif %}{%- endfor %} +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app.conf.j2 b/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app.conf.j2 new file mode 100644 index 00000000000..5288883d35c --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app.conf.j2 @@ -0,0 +1,19 @@ +# +# {{ ansible_managed }} +# + +{% if edx_django_service_is_devstack %} +[program:nginx] +command=nginx -g 'daemon off;' +killasgroup=true +stopasgroup=true +{% endif %} + +[program:{{ edx_django_service_name }}] +command={{ edx_django_service_home }}/{{ edx_django_service_name }}.sh +user={{ common_web_user }} +directory={{ edx_django_service_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/edx_django_service_with_rendered_config/defaults/main.yml b/playbooks/roles/edx_django_service_with_rendered_config/defaults/main.yml new file mode 100644 index 00000000000..bf311d99f7f --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/defaults/main.yml @@ -0,0 +1,243 @@ +--- +edx_django_service_with_rendered_config_service_name: ' NOT-SET ' +edx_django_service_with_rendered_config_repo: '{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_home: '{{ COMMON_APP_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_user: '{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_use_python3: true + +# This should be overwritten at the time Ansible is run. +edx_django_service_with_rendered_config_is_devstack: false + +edx_django_service_with_rendered_config_has_static_assets: true + +edx_django_service_with_rendered_config_wsgi_name: '{{ edx_django_service_with_rendered_config_service_name }}' + +edx_django_service_with_rendered_config_name_devstack_logs: + - '{{ supervisor_log_dir }}/{{ edx_django_service_with_rendered_config_service_name }}-stdout.log' + - '{{ supervisor_log_dir }}/{{ edx_django_service_with_rendered_config_service_name }}-stderr.log' + +edx_django_service_with_rendered_config_git_protocol: '{{ COMMON_GIT_PROTOCOL }}' +edx_django_service_with_rendered_config_git_domain: '{{ COMMON_GIT_MIRROR }}' +edx_django_service_with_rendered_config_git_path: '{{ COMMON_GIT_PATH }}' +edx_django_service_with_rendered_config_version: 'master' +edx_django_service_with_rendered_config_git_identity: null +edx_django_service_with_rendered_config_django_settings_module: null + +edx_django_service_with_rendered_config_code_dir: '{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_venv_dir: '{{ edx_django_service_with_rendered_config_home }}/venvs/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_venv_bin_dir: '{{ edx_django_service_with_rendered_config_venv_dir }}/bin' + +edx_django_service_with_rendered_config_nodeenv_dir: '{{ edx_django_service_with_rendered_config_home }}/nodeenvs/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_nodeenv_bin: '{{ edx_django_service_with_rendered_config_nodeenv_dir }}/bin' +edx_django_service_with_rendered_config_node_modules_dir: '{{ edx_django_service_with_rendered_config_code_dir }}/node_modules' +edx_django_service_with_rendered_config_node_bin: '{{ edx_django_service_with_rendered_config_node_modules_dir }}/.bin' +edx_django_service_with_rendered_config_node_version: '12.11.1' + +edx_django_service_with_rendered_config_environment_default: + DJANGO_SETTINGS_MODULE: '{{ edx_django_service_with_rendered_config_django_settings_module }}' + PATH: '{{ edx_django_service_with_rendered_config_nodeenv_bin }}:{{ edx_django_service_with_rendered_config_venv_dir }}/bin:{{ ansible_env.PATH }}' +edx_django_service_with_rendered_config_environment_extra: {} +edx_django_service_with_rendered_config_environment: '{{ edx_django_service_with_rendered_config_environment_default | combine(edx_django_service_with_rendered_config_environment_extra) }}' + +edx_django_service_with_rendered_config_migration_environment_default: + DB_MIGRATION_USER: '{{ COMMON_MYSQL_MIGRATE_USER }}' + DB_MIGRATION_PASS: '{{ COMMON_MYSQL_MIGRATE_PASS }}' +edx_django_service_with_rendered_config_migration_environment: '{{ edx_django_service_with_rendered_config_environment|combine(edx_django_service_with_rendered_config_migration_environment_default) }}' + +edx_django_service_with_rendered_config_debian_pkgs_default: + - gettext + - libffi-dev # Needed to install the Python cryptography library for asymmetric JWT signing + - libmemcached-dev + - libmysqlclient-dev + - libssl-dev +edx_django_service_with_rendered_config_debian_pkgs_extra: [] +edx_django_service_with_rendered_config_debian_pkgs: '{{ edx_django_service_with_rendered_config_debian_pkgs_default + edx_django_service_with_rendered_config_debian_pkgs_extra }}' + +edx_django_service_with_rendered_config_gunicorn_extra: '' +edx_django_service_with_rendered_config_gunicorn_extra_conf: '' +edx_django_service_with_rendered_config_gunicorn_host: '127.0.0.1' +edx_django_service_with_rendered_config_gunicorn_port: null +edx_django_service_with_rendered_config_gunicorn_timeout: 300 +edx_django_service_with_rendered_config_gunicorn_workers: 2 +edx_django_service_with_rendered_config_gunicorn_worker_class: 'gevent' +edx_django_service_with_rendered_config_gunicorn_max_requests: null + +edx_django_service_with_rendered_config_cors_whitelist: [] +edx_django_service_with_rendered_config_allow_cors_headers: false +edx_django_service_with_rendered_config_max_webserver_upload: !!null +edx_django_service_with_rendered_config_allow_cors_credentials: false + +nginx_edx_django_service_with_rendered_config_gunicorn_hosts: + - 127.0.0.1 + +edx_django_service_with_rendered_config_hostname: '~^((stage|prod)-)?{{ edx_django_service_with_rendered_config_service_name }}.*' +edx_django_service_with_rendered_config_nginx_port: '1{{ edx_django_service_with_rendered_config_gunicorn_port }}' +edx_django_service_with_rendered_config_nginx_read_timeout: !!null + +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE: false +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_S3_MAINTENANCE_FILE: "/edx-static/maintenance/maintenance.html" + +edx_django_service_with_rendered_config_ssl_nginx_port: '4{{ edx_django_service_with_rendered_config_gunicorn_port }}' +edx_django_service_with_rendered_config_enable_basic_auth: false +edx_django_service_with_rendered_config_basic_auth_exempted_paths_default: + - api +edx_django_service_with_rendered_config_basic_auth_exempted_paths_extra: [] +edx_django_service_with_rendered_config_basic_auth_exempted_paths: '{{ edx_django_service_with_rendered_config_basic_auth_exempted_paths_default + edx_django_service_with_rendered_config_basic_auth_exempted_paths_extra }}' + +edx_django_service_with_rendered_config_newrelic_appname: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_enable_newrelic_distributed_tracing: false + +edx_django_service_with_rendered_config_repos: + - PROTOCOL: '{{ edx_django_service_with_rendered_config_git_protocol }}' + DOMAIN: '{{ edx_django_service_with_rendered_config_git_domain }}' + PATH: '{{ edx_django_service_with_rendered_config_git_path }}' + REPO: '{{ edx_django_service_with_rendered_config_repo }}.git' + VERSION: '{{ edx_django_service_with_rendered_config_version }}' + DESTINATION: '{{ edx_django_service_with_rendered_config_code_dir }}' + SSH_KEY: '{{ edx_django_service_with_rendered_config_git_identity }}' + +edx_django_service_with_rendered_config_secret_key: null +edx_django_service_with_rendered_config_language_code: 'en-us' + +edx_django_service_with_rendered_config_data_dir: '{{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_media_root: '{{ edx_django_service_with_rendered_config_data_dir }}/media' +edx_django_service_with_rendered_config_media_url: '/media/' + +edx_django_service_with_rendered_config_static_root: '{{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}/staticfiles' +edx_django_service_with_rendered_config_staticfiles_storage: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +edx_django_service_with_rendered_config_media_storage_backend: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ edx_django_service_with_rendered_config_media_root }}' + MEDIA_URL: '{{ edx_django_service_with_rendered_config_media_url }}' + +edx_django_service_with_rendered_config_memcache: [ 'memcache' ] + +edx_django_service_with_rendered_config_caches: + default: + BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' + KEY_PREFIX: '{{ edx_django_service_with_rendered_config_service_name }}' + LOCATION: '{{ edx_django_service_with_rendered_config_memcache }}' + +edx_django_service_with_rendered_config_default_db_host: 'localhost' +edx_django_service_with_rendered_config_default_db_name: '{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_default_db_atomic_requests: false +edx_django_service_with_rendered_config_default_db_conn_max_age: 60 +edx_django_service_with_rendered_config_db_user: 'REPLACE-ME' +edx_django_service_with_rendered_config_db_password: 'password' +edx_django_service_with_rendered_config_db_options: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + +edx_django_service_with_rendered_config_databases: + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ edx_django_service_with_rendered_config_default_db_name }}' + USER: '{{ edx_django_service_with_rendered_config_db_user }}' + PASSWORD: '{{ edx_django_service_with_rendered_config_db_password }}' + HOST: '{{ edx_django_service_with_rendered_config_default_db_host }}' + PORT: '3306' + ATOMIC_REQUESTS: '{{ edx_django_service_with_rendered_config_default_db_atomic_requests }}' + CONN_MAX_AGE: '{{ edx_django_service_with_rendered_config_default_db_conn_max_age }}' + OPTIONS: '{{ edx_django_service_with_rendered_config_db_options }}' + +edx_django_service_with_rendered_config_social_auth_edx_oauth2_key: '{{ edx_django_service_with_rendered_config_service_name }}-sso-key' +edx_django_service_with_rendered_config_social_auth_edx_oauth2_secret: '{{ edx_django_service_with_rendered_config_service_name }}-sso-secret' +edx_django_service_with_rendered_config_backend_service_edx_oauth2_key: '{{ edx_django_service_with_rendered_config_service_name }}-backend-service-key' +edx_django_service_with_rendered_config_backend_service_edx_oauth2_secret: '{{ edx_django_service_with_rendered_config_service_name }}-backend-service-secret' +edx_django_service_with_rendered_config_social_auth_redirect_is_https: false + +edx_django_service_with_rendered_config_oauth2_url_root: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_with_rendered_config_oauth2_issuer: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_with_rendered_config_oauth2_logout_url: '{{ COMMON_OAUTH_LOGOUT_URL }}' +edx_django_service_with_rendered_config_oauth2_provider_url: '{{ COMMON_OAUTH_PUBLIC_URL_ROOT }}' + +edx_django_service_with_rendered_config_jwt_audience: '{{ COMMON_JWT_AUDIENCE }}' +edx_django_service_with_rendered_config_jwt_issuer: '{{ COMMON_JWT_ISSUER }}' +edx_django_service_with_rendered_config_jwt_secret_key: '{{ COMMON_JWT_SECRET_KEY }}' + +edx_django_service_with_rendered_config_session_expire_at_browser_close: false + +edx_django_service_with_rendered_config_jwt_auth: + JWT_ISSUERS: + - AUDIENCE: '{{ edx_django_service_with_rendered_config_jwt_audience }}' + ISSUER: '{{ edx_django_service_with_rendered_config_jwt_issuer }}' + SECRET_KEY: '{{ edx_django_service_with_rendered_config_jwt_secret_key }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + +edx_django_service_with_rendered_config_extra_apps: [] + +edx_django_service_with_rendered_config_api_root: !!null + +edx_django_service_with_rendered_config_service_config_default: + LANGUAGE_CODE: '{{ edx_django_service_with_rendered_config_language_code }}' + SECRET_KEY: '{{ edx_django_service_with_rendered_config_secret_key }}' + TIME_ZONE: 'UTC' + + STATIC_ROOT: '{{ edx_django_service_with_rendered_config_static_root }}' + MEDIA_STORAGE_BACKEND: '{{ edx_django_service_with_rendered_config_media_storage_backend }}' + STATICFILES_STORAGE: '{{ edx_django_service_with_rendered_config_staticfiles_storage }}' + + CACHES: '{{ edx_django_service_with_rendered_config_caches }}' + DATABASES: '{{ edx_django_service_with_rendered_config_databases }}' + + SOCIAL_AUTH_EDX_OAUTH2_KEY: '{{ edx_django_service_with_rendered_config_social_auth_edx_oauth2_key }}' + SOCIAL_AUTH_EDX_OAUTH2_SECRET: '{{ edx_django_service_with_rendered_config_social_auth_edx_oauth2_secret }}' + SOCIAL_AUTH_EDX_OAUTH2_ISSUER: '{{ edx_django_service_with_rendered_config_oauth2_issuer }}' + SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: '{{ edx_django_service_with_rendered_config_oauth2_url_root }}' + SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: '{{ edx_django_service_with_rendered_config_oauth2_logout_url }}' + SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ edx_django_service_with_rendered_config_social_auth_redirect_is_https }}' + + BACKEND_SERVICE_EDX_OAUTH2_KEY: '{{ edx_django_service_with_rendered_config_backend_service_edx_oauth2_key }}' + BACKEND_SERVICE_EDX_OAUTH2_SECRET: '{{ edx_django_service_with_rendered_config_backend_service_edx_oauth2_secret }}' + BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: '{{ edx_django_service_with_rendered_config_oauth2_provider_url }}' + + JWT_AUTH: '{{ edx_django_service_with_rendered_config_jwt_auth }}' + + EXTRA_APPS: '{{ edx_django_service_with_rendered_config_extra_apps }}' + + EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: '{{ edx_django_service_with_rendered_config_oauth2_url_root }}/user_info' + + SESSION_EXPIRE_AT_BROWSER_CLOSE: '{{ edx_django_service_with_rendered_config_session_expire_at_browser_close }}' + + API_ROOT: '{{ edx_django_service_with_rendered_config_api_root }}' + +# NOTE: This should be overridden by inheriting service-specific role. +edx_django_service_with_rendered_config_service_config_overrides: {} +edx_django_service_with_rendered_config_service_config: '{{ edx_django_service_with_rendered_config_service_config_default|combine(edx_django_service_with_rendered_config_service_config_overrides) }}' + +edx_django_service_with_rendered_config_automated_users: + automated_user: + sudo_commands: + - command: '{{ edx_django_service_with_rendered_config_venv_dir }}/python {{ edx_django_service_with_rendered_config_code_dir }}/manage.py showmigrations' + sudo_user: '{{ edx_django_service_with_rendered_config_user }}' + authorized_keys: + - 'SSH authorized key' + +edx_django_service_with_rendered_config_sandbox_build: false + +# This array contains commands that should be run after migration. +# +# The commands will be executed from the code directory with the application's virtualenv activated. The migration +# environment (e.g. migration DB username/password) will NOT be used, so commands should not rely on these values being +# set. In other words, don't try to sneak in another run of the migrate management command. +# +# Example: +# edx_django_service_with_rendered_config_post_migrate_management_commands: +# - command: './manage.py conditional_command' +# when: '{{ foo }}' +# - command: './manage.py always_command' +# when: True +# +# In this example, the "conditional_command" will only be run when the variable `foo` is set to `True`. The +# "always_command" will always be run because its conditional is set to `True`. To minimize surprises, the `when` +# key *MUST* be supplied for all commands. +# +edx_django_service_with_rendered_config_post_migrate_commands: [] + +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_ADMIN_URLS_RESTRICTION: false + +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ADMIN_URLS: [] diff --git a/playbooks/roles/edx_django_service_with_rendered_config/meta/main.yml b/playbooks/roles/edx_django_service_with_rendered_config/meta/main.yml new file mode 100644 index 00000000000..b9e62940116 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/meta/main.yml @@ -0,0 +1,15 @@ +--- +dependencies: + - common + - supervisor + - role: automated + AUTOMATED_USERS: "{{ edx_django_service_with_rendered_config_automated_users }}" + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ edx_django_service_with_rendered_config_service_name }}" + edx_service_with_rendered_config_service_config: "{{ edx_django_service_with_rendered_config_service_config }}" + edx_service_with_rendered_config_repos: "{{ edx_django_service_with_rendered_config_repos }}" + edx_service_with_rendered_config_user: "{{ edx_django_service_with_rendered_config_user }}" + edx_service_with_rendered_config_home: "{{ edx_django_service_with_rendered_config_home }}" + edx_service_with_rendered_config_packages: + debian: "{{ edx_django_service_with_rendered_config_debian_pkgs }}" + redhat: [] diff --git a/playbooks/roles/edx_django_service_with_rendered_config/tasks/main.yml b/playbooks/roles/edx_django_service_with_rendered_config/tasks/main.yml new file mode 100644 index 00000000000..2ce3960ed56 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/tasks/main.yml @@ -0,0 +1,283 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx_django_service_with_rendered_config +# +# Overview: +# For devstack set edx_django_service_with_rendered_config_is_devstack to true. +# +# Dependencies: +# +# +# Example play: +# +# + +- name: add gunicorn configuration file + template: + src: "edx/app/app/app_gunicorn.py.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_gunicorn.py" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + tags: + - install + - install:configuration + +- name: install python3 + apt: + name: "{{ item }}" + when: edx_django_service_with_rendered_config_use_python3 + with_items: + - python3-pip + - python3-dev + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=python3 {{ edx_django_service_with_rendered_config_venv_dir }}" + args: + creates: "{{ edx_django_service_with_rendered_config_venv_dir }}/bin/pip" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + when: edx_django_service_with_rendered_config_use_python3 + tags: + - install + - install:system-requirements + +- name: build virtualenv with python2.7 + command: "virtualenv --python=python2.7 {{ edx_django_service_with_rendered_config_venv_dir }}" + args: + creates: "{{ edx_django_service_with_rendered_config_venv_dir }}/bin/pip" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + when: not edx_django_service_with_rendered_config_use_python3 + tags: + - install + - install:system-requirements + +- name: Pin pip to a specific version. + command: "{{ edx_django_service_with_rendered_config_venv_dir }}/bin/pip install pip=={{ COMMON_PIP_VERSION }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + tags: + - install + - install:system-requirements + +# NOTE (CCB): Ideally we should use the pip Ansible command, +# but that doesn't seem to work with the Python 3.x virtualenv. +- name: install nodenv + command: pip install nodeenv + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + tags: + - install + - install:system-requirements + +- name: create nodeenv + command: "nodeenv {{ edx_django_service_with_rendered_config_nodeenv_dir }} --node={{ edx_django_service_with_rendered_config_node_version }} --prebuilt" + args: + creates: "{{ edx_django_service_with_rendered_config_nodeenv_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + tags: + - install + - install:system-requirements + +- name: install production requirements + command: make production-requirements + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + tags: + - install + - install:app-requirements + +- name: install development requirements + command: make requirements + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + when: edx_django_service_with_rendered_config_is_devstack is defined and edx_django_service_with_rendered_config_is_devstack + tags: + - install + - install:app-requirements + - devstack + - devstack:install + +- name: migrate database + command: make migrate + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_migration_environment }}" + when: migrate_db is defined and migrate_db|lower == "yes" + run_once: yes + tags: + - migrate + - migrate:db + +- name: run post-migrate commands + command: "{{ item.command }}" + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + with_items: '{{ edx_django_service_with_rendered_config_post_migrate_commands }}' + when: migrate_db is defined and migrate_db|lower == "yes" and item.when|bool + run_once: yes + tags: + - migrate + - migrate:db + - migrate:post + +- name: ensure log files exist for tailing + file: + path: "{{ item }}" + state: touch + owner: "{{ common_web_user }}" + group: "{{ common_web_user }}" + with_items: '{{ edx_django_service_with_rendered_config_name_devstack_logs }}' + tags: + - install + - install:configuration + +- name: write out the supervisor wrapper + template: + src: "edx/app/app/app.sh.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}.sh" + mode: 0650 + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + +- name: write supervisord config + template: + src: "edx/app/supervisor/conf.d.available/app.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: write devstack script + template: + src: "edx/app/app/devstack.sh.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: edx_django_service_with_rendered_config_is_devstack is defined and edx_django_service_with_rendered_config_is_devstack + tags: + - devstack + - devstack:install + +- name: setup the app env file + template: + src: "edx/app/app/app_env.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_env" + owner: "{{ edx_django_service_with_rendered_config_user }}" + group: "{{ edx_django_service_with_rendered_config_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: enable supervisor script + file: + src: "{{ supervisor_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ edx_django_service_with_rendered_config_service_name }}.conf" + state: link + force: yes + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: update supervisor configuration + command: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: create symlinks from the repo dir + file: + src: "{{ edx_django_service_with_rendered_config_code_dir }}/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ edx_django_service_with_rendered_config_service_name }}" + state: link + with_items: + - manage.py + tags: + - install + - install:app-requirements + +- name: compile static assets + command: make static + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + when: edx_django_service_with_rendered_config_has_static_assets + tags: + - assets + - assets:gather + +- name: restart the application + supervisorctl: + state: restarted + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + name: "{{ edx_django_service_with_rendered_config_service_name }}" + when: not disable_edx_services + become_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:start + +- name: Copying nginx configs for the service + template: + src: "edx/app/nginx/sites-available/app.j2" + dest: "{{ nginx_sites_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}" + owner: root + group: "{{ common_web_user }}" + mode: 0640 + when: nginx_app_dir is defined + notify: reload nginx + tags: + - install + - install:vhosts + +- name: Creating nginx config links for the service + file: + src: "{{ nginx_sites_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ edx_django_service_with_rendered_config_service_name }}" + state: link + owner: root + group: root + when: nginx_app_dir is defined + notify: reload nginx + tags: + - install + - install:vhosts + +- name: Include JWT signature setting in the app config file + include_role: + name: jwt_signature + when: edx_django_service_with_rendered_config_sandbox_build + vars: + app_name: '{{ edx_django_service_with_rendered_config_service_name }}' + app_config_file: "{{ COMMON_CFG_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}.yml" + app_config_owner: root + app_config_group: root + app_config_mode: 0644 diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app.sh.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app.sh.j2 new file mode 100644 index 00000000000..0a7b94b2e64 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app.sh.j2 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edx_django_service_with_rendered_config_venv_bin = edx_django_service_with_rendered_config_venv_dir + "/bin" %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edx_django_service_with_rendered_config_venv_bin + '/newrelic-admin run-program ' + edx_django_service_with_rendered_config_venv_bin + '/gunicorn' %} +{% else %} +{% set executable = edx_django_service_with_rendered_config_venv_bin + '/gunicorn' %} +{% endif %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ edx_django_service_with_rendered_config_enable_newrelic_distributed_tracing }}" +export NEW_RELIC_APP_NAME="{{ edx_django_service_with_rendered_config_newrelic_appname }}" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_with_rendered_config_service_name }}" + +source {{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_env +exec {{ executable }} -c {{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_gunicorn.py {{ edx_django_service_with_rendered_config_gunicorn_extra }} {{ edx_django_service_with_rendered_config_wsgi_name }}.wsgi:application diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_env.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_env.j2 new file mode 100644 index 00000000000..b6c48b27077 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% for name,value in edx_django_service_with_rendered_config_environment.items() -%} +{%- if value -%} +export {{ name }}="{{ value }}" +{% endif %} +{%- endfor %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_gunicorn.py.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_gunicorn.py.j2 new file mode 100644 index 00000000000..598409cb561 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_gunicorn.py.j2 @@ -0,0 +1,18 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html +{{ ansible_managed }} +""" + +timeout = {{ edx_django_service_with_rendered_config_with_rendered_config_gunicorn_timeout }} +bind = "{{ edx_django_service_with_rendered_config_gunicorn_host }}:{{ edx_django_service_with_rendered_config_gunicorn_port }}" +pythonpath = "{{ edx_django_service_with_rendered_config_code_dir }}" +workers = {{ edx_django_service_with_rendered_config_gunicorn_workers }} +worker_class = "{{ edx_django_service_with_rendered_config_gunicorn_worker_class }}" + +{% if edx_django_service_with_rendered_config_gunicorn_max_requests -%} +max_requests = {{ edx_django_service_with_rendered_config_gunicorn_max_requests }} +{% endif %} + +{{ edx_django_service_with_rendered_config_gunicorn_extra_conf }} + +{{ common_pre_request }} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/devstack.sh.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/devstack.sh.j2 new file mode 100644 index 00000000000..f41d4a7a874 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/devstack.sh.j2 @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . {{ edx_django_service_with_rendered_config_nodeenv_bin }}/activate + . {{ edx_django_service_with_rendered_config_venv_bin_dir }}/activate + cd {{ edx_django_service_with_rendered_config_code_dir }} + + /bin/bash + ;; + exec) + shift + + . {{ edx_django_service_with_rendered_config_nodeenv_bin }}/activate + . {{ edx_django_service_with_rendered_config_venv_bin_dir }}/activate + cd {{ edx_django_service_with_rendered_config_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/app.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/app.j2 new file mode 100644 index 00000000000..9e8c72db16c --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/app.j2 @@ -0,0 +1,34 @@ +# +# {{ ansible_managed }} +# + +{% include "concerns/upstream.j2"%} +{% include "concerns/cors-build-map.j2" %} + +server { + server_name {{ edx_django_service_with_rendered_config_hostname }}; + listen {{ edx_django_service_with_rendered_config_nginx_port }}; + +{% if NGINX_ENABLE_SSL %} + {% include "concerns/handle-ip-disclosure.j2" %} + rewrite ^ https://$host$request_uri? permanent; +{% else %} + {% if NGINX_REDIRECT_TO_HTTPS %} + {% include "concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2" %} + {% include "concerns/handle-tls-terminated-elsewhere-redirect.j2" %} + {% endif %} + {% include "concerns/app-common.j2" %} +{% endif %} +} + +{% if NGINX_ENABLE_SSL %} +server { + server_name {{ edx_django_service_with_rendered_config_hostname }}; + listen {{ edx_django_service_with_rendered_config_ssl_nginx_port }} ssl; + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + {% include "concerns/app-common.j2" %} +} +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 new file mode 100644 index 00000000000..a631375b7bf --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 @@ -0,0 +1,11 @@ +{% if NGINX_ADMIN_ACCESS_CIDRS and EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_ADMIN_URLS_RESTRICTION %} + location ~ ^/({{ EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ADMIN_URLS|join("|") }}) { + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} + allow {{ cidr }}; + {% endfor %} + deny all; + try_files $uri @proxy_to_app; + } +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/app-common.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/app-common.j2 new file mode 100644 index 00000000000..75a2497dccf --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/app-common.j2 @@ -0,0 +1,10 @@ + +{% include "concerns/s3_maintenance.j2" %} +{% include "concerns/static-assets.j2" %} +{% include "concerns/proxy-to-app.j2" %} +{% if edx_django_service_with_rendered_config_max_webserver_upload %} + client_max_body_size {{ edx_django_service_with_rendered_config_max_webserver_upload }}M; +{% endif %} +{% if edx_django_service_with_rendered_config_nginx_read_timeout %} + proxy_read_timeout {{ edx_django_service_with_rendered_config_nginx_read_timeout }}; +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 new file mode 100644 index 00000000000..82c8cf21a7d --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 @@ -0,0 +1,17 @@ +{% if edx_django_service_with_rendered_config_enable_basic_auth|bool %} + satisfy any; + + allow 127.0.0.1; + + {% for cidr in COMMON_BASIC_AUTH_EXCEPTIONS %} + allow {{ cidr }}; + {% endfor %} + + deny all; + + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + + index index.html + proxy_set_header X-Forwarded-Proto https; +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 new file mode 100644 index 00000000000..fa96d4d179f --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 @@ -0,0 +1,16 @@ + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' $cors_origin; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'Authorization, USE-JWT-COOKIE'; + {% if edx_django_service_with_rendered_config_allow_cors_credentials %} + add_header 'Access-Control-Allow-Credentials' true; + {% endif %} + add_header 'Access-Control-Max-Age' 86400; + add_header 'Content-Type' 'text/plain; charset=utf-8'; + add_header 'Content-Length' 0; + return 204; + } + + add_header 'Access-Control-Allow-Origin' $cors_origin always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; + add_header 'Access-Control-Allow-Credentials' true always; diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 new file mode 100644 index 00000000000..a8df0c26dfd --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 @@ -0,0 +1,15 @@ + + +# The Origin request header indicates where a fetch originates from. It doesn't include any path information, +# but only the server name (e.g. https://www.example.com). +# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin for details. + +# Here we set the value that is included in the Access-Control-Allow-Origin response header. If the origin is one +# of our known hosts--served via HTTP or HTTPS--we allow for CORS. Otherwise, we set the "null" value, disallowing CORS. + +map $http_origin $cors_origin { +default "null"; +{% for host in edx_django_service_with_rendered_config_cors_whitelist %} + "~*^https?:\/\/{{ host|replace('.', '\.') }}$" $http_origin; +{% endfor %} +} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 new file mode 100644 index 00000000000..e006fbab8ca --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 @@ -0,0 +1,12 @@ +# If you are changing this be warned that it lives in multiple places +# there is a TLS redirect to same box, and a TLS redirect to externally terminated TLS +# version of this in nginx and in edx_django_service_with_rendered_config role. + +{% if NGINX_ALLOW_PRIVATE_IP_ACCESS %} +# This regexp matches only public IP addresses. +if ($host ~ "(\d+)(? 0 %} +location /robots.txt { + root {{ nginx_app_dir }}; + try_files $uri /robots.txt =404; +} +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 new file mode 100644 index 00000000000..82b5dc0c58a --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 @@ -0,0 +1,26 @@ +{% if EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE %} + # Do not include a 502 error in NGINX_ERROR_PAGES when + # EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE is enabled. + + # Return a 503 instead so that it passes through Cloudflare + error_page 502 =503 @maintenance; + + # This section of the file was copied from playbooks/roles/nginx/templates/edx/app/nginx/sites-available/ + # modifications should be made to both files if necessary. + + {% if EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE %} + location @maintenance { + rewrite ^(.*) {{ EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_S3_MAINTENANCE_FILE }} break; + proxy_http_version 1.1; + proxy_set_header Host s3.amazonaws.com; + proxy_set_header Authorization ''; + proxy_hide_header x-amz-id-2; + proxy_hide_header x-amz-request-id; + proxy_hide_header Set-Cookie; + proxy_ignore_headers "Set-Cookie"; + proxy_buffering off; + proxy_intercept_errors on; + proxy_pass https://s3.amazonaws.com; + } + {% endif %} +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 new file mode 100644 index 00000000000..8e1ef780460 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 @@ -0,0 +1,15 @@ +location ~ ^/static/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}; + {% include "concerns/cors-add-header.j2" %} + + # Inform downstream caches to take certain headers into account when reading/writing to cache. + add_header 'Vary' 'Accept-Encoding,Origin'; + + try_files /staticfiles/$file =404; +} + +location ~ ^/media/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}; + try_files /media/$file =404; +} + diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/upstream.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/upstream.j2 new file mode 100644 index 00000000000..09b069e12c5 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/upstream.j2 @@ -0,0 +1,5 @@ +upstream {{ edx_django_service_with_rendered_config_service_name }}_app_server { +{% for host in nginx_edx_django_service_with_rendered_config_gunicorn_hosts %} + server {{ host }}:{{ edx_django_service_with_rendered_config_gunicorn_port }} fail_timeout=0; +{% endfor %} +} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/supervisor/conf.d.available/app.conf.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/supervisor/conf.d.available/app.conf.j2 new file mode 100644 index 00000000000..57fe8557f99 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/supervisor/conf.d.available/app.conf.j2 @@ -0,0 +1,19 @@ +# +# {{ ansible_managed }} +# + +{% if edx_django_service_with_rendered_config_is_devstack %} +[program:nginx] +command=nginx -g 'daemon off;' +killasgroup=true +stopasgroup=true +{% endif %} + +[program:{{ edx_django_service_with_rendered_config_service_name }}] +command={{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}.sh +user={{ common_web_user }} +directory={{ edx_django_service_with_rendered_config_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/edx_maintenance/defaults/main.yml b/playbooks/roles/edx_maintenance/defaults/main.yml new file mode 100644 index 00000000000..6eec1fbae8b --- /dev/null +++ b/playbooks/roles/edx_maintenance/defaults/main.yml @@ -0,0 +1,21 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role edx_maintenance +# + +# +# vars are namespace with the module name. +# +ENABLE_MAINTENANCE: False +EDX_MAINTENANCE_SUPERVISOR_APPS: + - 'lms' + - 'cms' + - 'edxapp_worker:' diff --git a/playbooks/roles/edx_maintenance/meta/main.yml b/playbooks/roles/edx_maintenance/meta/main.yml new file mode 100644 index 00000000000..59d8bde041c --- /dev/null +++ b/playbooks/roles/edx_maintenance/meta/main.yml @@ -0,0 +1,23 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role edx_maintenance +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - common_vars diff --git a/playbooks/roles/edx_maintenance/tasks/main.yml b/playbooks/roles/edx_maintenance/tasks/main.yml new file mode 100644 index 00000000000..d23f3589e9e --- /dev/null +++ b/playbooks/roles/edx_maintenance/tasks/main.yml @@ -0,0 +1,71 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx_maintenance +# + +- name: Find supervisor apps + supervisorctl: + name: "{{ item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: present + become_user: "{{ supervisor_service_user }}" + ignore_errors: yes + register: supervisor_apps + with_items: "{{ EDX_MAINTENANCE_SUPERVISOR_APPS }}" + tags: + - manage + +- name: Enable fake heartbeat + copy: + content: "" + dest: "{{ nginx_server_static_dir }}/maintenance_heartbeat.txt" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + when: ENABLE_MAINTENANCE + tags: + - manage + +- name: Stop edxapp + supervisorctl: + # Use item.item because item.item strips the : off of edxapp_worker: + name: "{{ item.item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: stopped + become_user: "{{ supervisor_service_user }}" + when: ENABLE_MAINTENANCE and not item is failed + with_items: "{{ supervisor_apps.results }}" + tags: + - manage + +- name: Start edxapp + supervisorctl: + # Use item.item because item.item strips the : off of edxapp_worker: + name: "{{ item.item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + become_user: "{{ supervisor_service_user }}" + when: not ENABLE_MAINTENANCE and not item is failed + with_items: "{{ supervisor_apps.results }}" + tags: + - manage + +- name: Disable fake heartbeat + file: + dest: "{{ nginx_server_static_dir }}/maintenance_heartbeat.txt" + state: absent + when: not ENABLE_MAINTENANCE + tags: + - manage diff --git a/playbooks/roles/edx_notes_api/defaults/main.yml b/playbooks/roles/edx_notes_api/defaults/main.yml new file mode 100644 index 00000000000..3da053356bd --- /dev/null +++ b/playbooks/roles/edx_notes_api/defaults/main.yml @@ -0,0 +1,147 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role edx-notes-api +# + +EDX_NOTES_API_GUNICORN_WORKERS_EXTRA: "" +EDX_NOTES_API_WORKERS_EXTRA_CONF: "" +EDX_NOTES_API_LANG: en-us +EDX_NOTES_API_WORKERS: !!null +EDX_NOTES_API_DATASTORE_NAME: "{{ edx_notes_api_service_name }}" +EDX_NOTES_API_MYSQL_DB_USER: notes001 +EDX_NOTES_API_MYSQL_DB_NAME: "{{ EDX_NOTES_API_DATASTORE_NAME }}" +EDX_NOTES_API_MYSQL_DB_PASS: secret +EDX_NOTES_API_MYSQL_HOST: localhost +EDX_NOTES_API_MYSQL_PORT: "3306" +EDX_NOTES_API_MYSQL_OPTIONS: + connect_timeout: 10 +EDX_NOTES_API_ELASTICSEARCH_URL: "localhost:9200" +EDX_NOTES_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-notes" +EDX_NOTES_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false +EDX_NOTES_API_HOSTNAME: '~^((stage|prod)-)?notes.*' + +# Change these values!! +EDX_NOTES_API_SECRET_KEY: "CHANGEME" +EDX_NOTES_API_CLIENT_ID: "CHANGEME" +EDX_NOTES_API_CLIENT_SECRET: "CHANGEME" +EDX_NOTES_API_GIT_SSH_KEY: !!null +EDX_NOTES_API_VERSION: master +EDX_NOTES_API_DJANGO_SETTINGS_MODULE: 'notesserver.settings.yaml_config' + +EDX_NOTES_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + +EDX_NOTES_API_DATABASES: + # rw user + default: + ENGINE: django.db.backends.mysql + NAME: "{{ EDX_NOTES_API_MYSQL_DB_NAME }}" + USER: "{{ EDX_NOTES_API_MYSQL_DB_USER }}" + PASSWORD: "{{ EDX_NOTES_API_MYSQL_DB_PASS }}" + HOST: "{{ EDX_NOTES_API_MYSQL_HOST }}" + PORT: "{{ EDX_NOTES_API_MYSQL_PORT }}" + OPTIONS: "{{ EDX_NOTES_API_MYSQL_OPTIONS }}" +EDX_NOTES_API_ALLOWED_HOSTS: + - localhost +EDX_NOTES_API_DISABLE_TOKEN_CHECK: False + +EDX_NOTES_API_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: edx-notes-api.git + VERSION: "{{ EDX_NOTES_API_VERSION }}" + DESTINATION: "{{ edx_notes_api_code_dir }}" + SSH_KEY: "{{ EDX_NOTES_API_GIT_SSH_KEY }}" + +# Remote config +EDX_NOTES_API_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +EDX_NOTES_API_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +EDX_NOTES_API_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# +# This data structure will be written out to yaml configuration file +# in /edx/etc +# +edx_notes_api_service_config: + ALLOWED_HOSTS: "{{ EDX_NOTES_API_ALLOWED_HOSTS }}" + # replace with your secret key + SECRET_KEY: '{{ EDX_NOTES_API_SECRET_KEY }}' + # replace with your oauth id and secret + CLIENT_ID: "{{ EDX_NOTES_API_CLIENT_ID }}" + CLIENT_SECRET: "{{ EDX_NOTES_API_CLIENT_SECRET }}" + ELASTICSEARCH_DSL: + default: + hosts: "{{ EDX_NOTES_API_ELASTICSEARCH_URL }}" + ELASTICSEARCH_INDEX: "edx_notes" + # Number of rows to return by default in result. + RESULTS_DEFAULT_SIZE: 25 + # Max number of rows to return in result. + RESULTS_MAX_SIZE: 250 + DATABASES: "{{ EDX_NOTES_API_DATABASES }}" + HAYSTACK_CONNECTIONS: + default: + ENGINE: 'notesserver.highlight.ElasticsearchSearchEngine' + URL: "{{ EDX_NOTES_API_ELASTICSEARCH_URL }}" + INDEX_NAME: '{{ EDX_NOTES_API_DATASTORE_NAME }}' + DISABLE_TOKEN_CHECK: "{{ EDX_NOTES_API_DISABLE_TOKEN_CHECK }}" + JWT_AUTH: + JWT_ISSUERS: + - AUDIENCE: '{{ COMMON_JWT_AUDIENCE }}' + ISSUER: '{{ COMMON_JWT_ISSUER }}' + SECRET_KEY: '{{ COMMON_JWT_SECRET_KEY }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + USERNAME_REPLACEMENT_WORKER: "{{ EDX_NOTES_USERNAME_REPLACEMENT_WORKER }}" + +# +# vars are namespace with the module name. +# + + +edx_notes_api_environment: + EDXNOTES_CONFIG_ROOT: "{{ COMMON_CFG_DIR }}" + LANG: "{{ EDX_NOTES_API_LANG }}" + DJANGO_SETTINGS_MODULE: "{{ EDX_NOTES_API_DJANGO_SETTINGS_MODULE }}" + SERVICE_VARIANT: "{{ edx_notes_api_service_name }}" + PATH: "{{ edx_notes_api_venv_dir }}/bin:{{ ansible_env.PATH }}" + +edx_notes_api_service_name: edx_notes_api +edx_notes_api_user: "{{ edx_notes_api_service_name }}" +edx_notes_api_home: "{{ COMMON_APP_DIR }}/{{ edx_notes_api_service_name }}" +edx_notes_api_code_dir: "{{ edx_notes_api_home }}/{{ edx_notes_api_service_name }}" +edx_notes_api_conf_dir: "{{ edx_notes_api_home }}" +edx_notes_api_venv_dir: "{{ edx_notes_api_home }}/venvs/{{ edx_notes_api_service_name }}" +edx_notes_api_venv_bin: "{{ edx_notes_api_venv_dir }}/bin" + +edx_notes_api_gunicorn_host: "127.0.0.1" +edx_notes_api_gunicorn_port: 8120 +edx_notes_api_gunicorn_timeout: 300 +edx_notes_api_wsgi: notesserver.wsgi:application +edx_notes_api_nginx_port: 18120 +edx_notes_api_ssl_nginx_port: 48120 +edx_notes_api_manage: "{{ edx_notes_api_code_dir }}/manage.py" +edx_notes_api_requirements_base: "{{ edx_notes_api_code_dir }}/requirements" +# Application python requirements +edx_notes_api_requirements: + - base.txt + +# +# OS packages +# +edx_notes_api_debian_pkgs: + - libmysqlclient-dev + - libssl-dev # needed for mysqlclient python library + - python3-dev + - python3.8-dev + +edx_notes_api_redhat_pkgs: [] diff --git a/playbooks/roles/edx_notes_api/meta/main.yml b/playbooks/roles/edx_notes_api/meta/main.yml new file mode 100644 index 00000000000..5b268a95d51 --- /dev/null +++ b/playbooks/roles/edx_notes_api/meta/main.yml @@ -0,0 +1,35 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role edx-notes-api +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - common + - supervisor + - role: edx_service + edx_service_name: "{{ edx_notes_api_service_name }}" + edx_service_config: "{{ edx_notes_api_service_config }}" + edx_service_repos: "{{ EDX_NOTES_API_REPOS }}" + edx_service_user: "{{ edx_notes_api_user }}" + edx_service_home: "{{ edx_notes_api_home }}" + edx_service_packages: + debian: "{{ edx_notes_api_debian_pkgs }}" + redhat: "{{ edx_notes_api_redhat_pkgs }}" + edx_service_decrypt_config_enabled: "{{ EDX_NOTES_API_DECRYPT_CONFIG_ENABLED }}" + edx_service_copy_config_enabled: "{{ EDX_NOTES_API_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/edx_notes_api/tasks/main.yml b/playbooks/roles/edx_notes_api/tasks/main.yml new file mode 100644 index 00000000000..a412fd81dcc --- /dev/null +++ b/playbooks/roles/edx_notes_api/tasks/main.yml @@ -0,0 +1,180 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Tasks for role edx-notes-api +# +# Overview: +# +# Role for installing the edx-notes-api Django application, https://github.com/openedx/edx-notes-api. +# +# Dependencies: +# +# For a complete picture of dependencies, see: +# +# configuration/playbooks/role/edx-notes-api/meta/main.yml +# configuration/notes.yml +# +# Example play: +# +# - name: Deploy edX Notes API +# hosts: all +# become: True +# gather_facts: True +# vars: +# ENABLE_DATADOG: False +# ENABLE_SPLUNKFORWARDER: False +# ENABLE_NEWRELIC: True +# roles: +# - role: nginx +# nginx_sites: +# - edx-notes-api +# - aws +# - edx-notes-api +# - role: datadog +# when: COMMON_ENABLE_DATADOG +# - role: splunkforwarder +# when: COMMON_ENABLE_SPLUNKFORWARDER + +- name: Install application requirements + pip: + requirements: "{{ edx_notes_api_requirements_base }}/{{ item }}" + virtualenv: "{{ edx_notes_api_home }}/venvs/{{ edx_notes_api_service_name }}" + state: present + extra_args: "--exists-action w" + virtualenv_python: 'python3.8' + become_user: "{{ edx_notes_api_user }}" + with_items: "{{ edx_notes_api_requirements }}" + tags: + - install + - install:system-requirements + +- name: write devstack script + template: + src: "edx/app/edx_notes_api/devstack.sh.j2" + dest: "{{ edx_notes_api_home }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: devstack is defined and devstack + tags: + - devstack + - devstack:install + +- name: Migrate + shell: > + DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' + DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' + {{ edx_notes_api_home }}/venvs/{{ edx_notes_api_service_name }}/bin/python {{ edx_notes_api_manage }} migrate --noinput --settings="notesserver.settings.yaml_config" + args: + chdir: "{{ edx_notes_api_code_dir }}" + become_user: "{{ edx_notes_api_user }}" + environment: + EDXNOTES_CONFIG_ROOT: "{{ COMMON_CFG_DIR }}" + when: migrate_db is defined and migrate_db|lower == "yes" + run_once: yes + tags: + - migrate + - migrate:db + +- name: Write out gunicorn.py + template: + src: "edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2" + dest: "{{ edx_notes_api_home }}/{{ edx_notes_api_service_name }}_gunicorn.py" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0650" + tags: + - install + - install:configuration + +- name: Write out the supervisor wrapper + template: + src: "edx/app/edx_notes_api/edx_notes_api.sh.j2" + dest: "{{ edx_notes_api_home }}/{{ edx_notes_api_service_name }}.sh" + mode: "0650" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + + +- name: Write supervisord config + template: + src: "edx/app/supervisor/conf.d.available/edx_notes_api.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ edx_notes_api_service_name }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0644" + tags: + - install + - install:configuration + +- name: Setup the edx_notes_api env file + template: + src: "edx/app/edx_notes_api/edx_notes_api_env.j2" + dest: "{{ edx_notes_api_home }}/{{ edx_notes_api_service_name}}_env" + owner: "{{ edx_notes_api_user }}" + group: "{{ edx_notes_api_user }}" + mode: "0644" + tags: + - install + - install:configuration + + +- name: Enable supervisor script + file: + src: "{{ supervisor_available_dir }}/{{ edx_notes_api_service_name }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ edx_notes_api_service_name }}.conf" + state: link + force: yes + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: Update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: Restart supervisor + supervisorctl: + name: "{{ edx_notes_api_service_name }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: Create manage.py symlink + file: + src: "{{ edx_notes_api_manage }}" + dest: "{{ COMMON_BIN_DIR }}/manage.{{ edx_notes_api_service_name }}" + state: link + tags: + - install + - install:app-requirements + +- name: Restart edx_notes_api + supervisorctl: + name: "{{ edx_notes_api_service_name }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: not disable_edx_services + become_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:start diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/devstack.sh.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/devstack.sh.j2 new file mode 100644 index 00000000000..80dac778517 --- /dev/null +++ b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/devstack.sh.j2 @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ edx_notes_api_home }}/edx_notes_api_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . {{ edx_notes_api_venv_bin }}/activate + cd {{ edx_notes_api_code_dir }} + + /bin/bash + ;; + exec) + shift + + . {{ edx_notes_api_venv_bin }}/activate + cd {{ edx_notes_api_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api.sh.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api.sh.j2 new file mode 100644 index 00000000000..179ba8e8823 --- /dev/null +++ b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api.sh.j2 @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edx_notes_api_venv_bin = edx_notes_api_home + '/venvs/' + edx_notes_api_service_name + '/bin' %} +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edx_notes_api_venv_bin + '/newrelic-admin run-program ' + edx_notes_api_venv_bin + '/gunicorn' %} +{% else %} +{% set executable = edx_notes_api_venv_bin + '/gunicorn' %} +{% endif %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ EDX_NOTES_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" +export NEW_RELIC_APP_NAME="{{ EDX_NOTES_API_NEWRELIC_APPNAME }}" +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +source {{ edx_notes_api_home }}/edx_notes_api_env + +export PID=/var/tmp/edx_notes_api.pid +export PORT={{ edx_notes_api_gunicorn_port }} +export ADDRESS={{ edx_notes_api_gunicorn_host }} + +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} -c {{ edx_notes_api_home }}/edx_notes_api_gunicorn.py {{ EDX_NOTES_API_GUNICORN_WORKERS_EXTRA }} {{ edx_notes_api_wsgi }} diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_env.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_env.j2 new file mode 100644 index 00000000000..ae2aca6cb80 --- /dev/null +++ b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% for name,value in edx_notes_api_environment.items() -%} +{%- if value -%} +export {{ name }}="{{ value }}" +{% endif %} +{%- endfor %} diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2 new file mode 100644 index 00000000000..7c9ed639c3f --- /dev/null +++ b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2 @@ -0,0 +1,27 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html + +{{ ansible_managed }} +""" +import multiprocessing + +preload_app = True +timeout = {{ edx_notes_api_gunicorn_timeout }} +bind = "{{ edx_notes_api_gunicorn_host }}:{{ edx_notes_api_gunicorn_port }}" +pythonpath = "{{ edx_notes_api_code_dir }}" +limit_request_field_size = 16384 + +{% if EDX_NOTES_API_WORKERS %} +workers = {{ EDX_NOTES_API_WORKERS }} +{% else %} +workers = (multiprocessing.cpu_count()-1) * 2 + 2 +{% endif %} + +{{ common_pre_request }} + +{{ common_close_all_caches }} + +def post_fork(server, worker): + close_all_caches() + +{{ EDX_NOTES_API_WORKERS_EXTRA_CONF }} diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/supervisor/conf.d.available/edx_notes_api.conf.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/supervisor/conf.d.available/edx_notes_api.conf.j2 new file mode 100644 index 00000000000..848c1437f62 --- /dev/null +++ b/playbooks/roles/edx_notes_api/templates/edx/app/supervisor/conf.d.available/edx_notes_api.conf.j2 @@ -0,0 +1,13 @@ +# +# {{ ansible_managed }} +# + +[program:{{ edx_notes_api_service_name }}] + +command={{ edx_notes_api_home }}/{{ edx_notes_api_service_name }}.sh +user={{ common_web_user }} +directory={{ edx_notes_api_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/edx_service/defaults/main.yml b/playbooks/roles/edx_service/defaults/main.yml index ef29a147887..99eef1aaa7a 100644 --- a/playbooks/roles/edx_service/defaults/main.yml +++ b/playbooks/roles/edx_service/defaults/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Defaults for role edx_service @@ -14,12 +14,31 @@ # # vars are namespace with the module name. # -edx_service_role_name: edx_service +edx_service_name: edx_service + +edx_service_repos: [] + +# A few roles meta this role but don't need a config file written +# this allows them to not pass a config and the tasks will skip +# and not write out a config at all. +edx_service_config: {} + +# If you would like edx_service to strip out !!null settings before writing out +# the yaml config, set this to true. +edx_service_config_filter_nones: false # # OS packages # +edx_service_packages: + debian: [] + redhat: [] + +edx_service_local_config_file: "{{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml" -edx_service_debian_pkgs: [] +edx_service_decrypt_config_enabled: false +edx_service_copy_config_enabled: false -edx_service_redhat_pkgs: [] +edx_service_use_python38: false +edx_service_use_python3: false +edx_service_venv_dir: "{{ edx_service_home }}/venvs/{{ edx_service_name }}" diff --git a/playbooks/roles/edx_service/handlers/main.yml b/playbooks/roles/edx_service/handlers/main.yml deleted file mode 100644 index 48e31836ac2..00000000000 --- a/playbooks/roles/edx_service/handlers/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Handlers for role edx_service -# -# Overview: -# -# -- name: edx_service | notify me - debug: msg="stub handler" diff --git a/playbooks/roles/edx_service/meta/main.yml b/playbooks/roles/edx_service/meta/main.yml index 50f68b007c3..bacc9826fe3 100644 --- a/playbooks/roles/edx_service/meta/main.yml +++ b/playbooks/roles/edx_service/meta/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Role includes for role edx_service @@ -18,3 +18,18 @@ # my_role_var0: "foo" # my_role_var1: "bar" # } + +dependencies: + - role: add_user + user_name: "{{ edx_service_name }}" + user_home: "{{ edx_service_home }}" + group_name: "{{ common_web_group }}" + - role: git_clone + repo_owner: "{{ edx_service_user }}" + repo_group: "{{ edx_service_user }}" + GIT_REPOS: "{{ edx_service_repos }}" + git_home: "{{ edx_service_home }}" + when: edx_service_repos is defined + +# Allow this role to be duplicated in dependencies. +allow_duplicates: yes diff --git a/playbooks/roles/edx_service/tasks/main.yml b/playbooks/roles/edx_service/tasks/main.yml index fbc44b4750f..ca733b30d79 100644 --- a/playbooks/roles/edx_service/tasks/main.yml +++ b/playbooks/roles/edx_service/tasks/main.yml @@ -2,15 +2,15 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # # Tasks for role edx_service -# +# # Overview: # # This role performs the repetive tasks that most edX roles @@ -19,37 +19,237 @@ # Example play: # # Rather than being included in the play, this role -# is included as a dependency by other roles in the meta/mail.yml +# is included as a dependency by other roles in the meta/main.yml # file. The including role should add the following # depency definition. # # dependencies: -# - { role: edx_service, edx_service_name: "hotg" } -# - -- name: create application user - user: > - name="{{ edx_service_name }}" - home="{{ COMMON_APP_DIR }}/{{ edx_service_name }}" - createhome=no - shell=/bin/false - -- name: create edx_service app and venv dir - file: > - path="{{ item }}" - state=directory - owner="{{ edx_service_name }}" - group="{{ common_web_group }}" +# - role: edx_service +# edx_service_name: "hotg" +# edx_service_config: "{{ structure_to_be_written_to_config_file_in_/edx/etc }}" +# edx_service_repos: +# - PROTOCOL: [https/ssh] +# DOMAIN: github.com +# PATH: edx +# REPO: hotg +# VERSION: master +# DESTINATION: "/edx/app/hotg/hotg" +# SSH_KEY: +# - PROTOCOL +# ... +# edx_service_user: hotg_system_user +# edx_service_home: "/edx/app/hotg" +# edx_service_packages: +# debian: [ pkg1, pkg2, pkg3 ] +# redhat: [ pkg4, pkg5 ] +# + +- name: Create edx_service app, venv, data, and staticfiles dirs + file: + path: "{{ edx_service_home }}/{{ item }}" + state: directory + owner: "{{ edx_service_name }}" + group: "{{ common_web_group }}" + with_items: + - "" + - "venvs" + - "data" + - "staticfiles" + tags: + - install + - install:base + +- name: Create /edx/var/app dir + file: + path: "/edx/var/{{ edx_service_name }}" + state: directory + owner: "{{ edx_service_user }}" + group: "{{ common_web_group }}" + mode: "0755" + tags: + - install + - install:base + +- name: Create /edx/etc dir + file: + path: "/edx/etc" + state: directory + owner: "{{ edx_service_user }}" + group: "{{ common_web_group }}" + mode: "0755" + tags: + - install + - install:base + +- name: Create edx_service log dir + file: + path: "{{ item }}" + state: directory + owner: "syslog" + group: "syslog" + with_items: + - "{{ COMMON_LOG_DIR }}/{{ edx_service_name }}" + tags: + - install + - install:base + +- name: Write out app config file + template: + src: "config.yml.j2" + dest: "{{ COMMON_CFG_DIR }}/{{ edx_service_name }}.yml" + mode: "0644" + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: not edx_service_decrypt_config_enabled + tags: + - install + - install:configuration + - install:app-configuration + +- name: Install decrypt config private key from variable + local_action: + module: copy + content: "{{ DECRYPT_CONFIG_PRIVATE_KEY_VAR }}" + dest: "{{ DECRYPT_CONFIG_PRIVATE_KEY_PATH | default('/var/tmp') }}/private.key" + force: yes + mode: "0644" + become: false + no_log: True + when: edx_service_decrypt_config_enabled and DECRYPT_CONFIG_PRIVATE_KEY_VAR is defined + tags: + - install + - install:configuration + - install:app-configuration + +- name: Decrypt app config file + local_action: command asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path {{ ENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml --private_key_path {{ DECRYPT_CONFIG_PRIVATE_KEY }} --outfile_path {{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml + become: false + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: edx_service_decrypt_config_enabled + tags: + - install + - install:configuration + - install:app-configuration + +- name: Install Python 3.8 + apt: + pkg: + - python3.8-dev + - python3.8-distutils + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: edx_service_use_python38 + tags: + - install + - install:system-requirements + +- name: install python3 + apt: + name: "{{ item }}" + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: edx_service_use_python3 and not edx_service_use_python38 with_items: - - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}" - - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs" + - python3-pip + - python3-dev + tags: + - install + - install:system-requirements + +- name: Build virtualenv with Python 3.8 + command: "virtualenv --python=python3.8 {{ edx_service_venv_dir }}" + args: + creates: "{{ edx_service_venv_dir }}/bin/pip" + become_user: "{{ edx_service_user }}" + when: edx_service_use_python38 + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=python3 {{ edx_service_venv_dir }}" + args: + creates: "{{ edx_service_venv_dir }}/bin/pip" + become_user: "{{ edx_service_user }}" + when: edx_service_use_python3 and not edx_service_use_python38 + tags: + - install + - install:system-requirements -- name: install a bunch of system packages on which edx_service relies - apt: pkg={{ item }} state=present - with_items: "{{ edx_service_name }}_debian_pkgs" +- name: Replace deploy host to sandbox dns name + replace: + path: "{{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml" + regexp: 'deploy_host' + replace: "{{ COMMON_DEPLOY_HOSTNAME }}" + when: edx_service_decrypt_config_enabled and SANDBOX_CONFIG + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + become: false + delegate_to: localhost + tags: + - install + - install:configuration + - install:app-configuration + +- name: Copy app config file + copy: + src: "{{ edx_service_local_config_file }}" + dest: "{{ COMMON_CFG_DIR }}/{{ edx_service_name }}.yml" + mode: 0644 + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: edx_service_copy_config_enabled + tags: + - install + - install:configuration + - install:app-configuration + +- name: Install a bunch of system packages on which edx_service relies + apt: + name: "{{ edx_service_packages.debian }}" + state: present + update_cache: true + cache_valid_time: 3600 + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 when: ansible_distribution in common_debian_variants + tags: + - install + - install:system-requirements + +- name: Install a bunch of system packages on which edx_service relies + yum: + name: "{{ edx_service_packages.redhat }}" + state: present + when: ansible_distribution in common_redhat_variants + tags: + - install + - install:system-requirements + +- name: Get instance information + action: ec2_metadata_facts + when: AWS_GATHER_FACTS | default(false) + tags: + - to-remove + +- name: Tag instance + ec2_tag_local: + args: + resource: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + tags: + - Name: "version:{{ edx_service_name }}" + Value: "{{ item.0.DOMAIN }}/{{ item.0.PATH }}/{{ item.0.REPO }} {{ item.1.after |truncate(7,True,'') }}" + when: item.1.after is defined and COMMON_TAG_EC2_INSTANCE and edx_service_repos is defined + with_together: + - "{{ edx_service_repos }}" + - "{{ code_checkout.results }}" + tags: + - to-remove -- name: install a bunch of system packages on which edx_service relies - yum: pkg={{ item }} state=present - with_items: "{{ edx_service_name }}_redhat_pkgs" - when: ansible_distribution in common_redhat_variants \ No newline at end of file +#TODO: restart supervisor- depends on supervisor being refactored into this role diff --git a/playbooks/roles/edx_service/templates/config.yml.j2 b/playbooks/roles/edx_service/templates/config.yml.j2 new file mode 100644 index 00000000000..93d89041fd5 --- /dev/null +++ b/playbooks/roles/edx_service/templates/config.yml.j2 @@ -0,0 +1,12 @@ +--- +# {{ ansible_managed }} + +{% if edx_service_config_filter_nones -%} + {% for key, value in edx_service_config.copy().items() -%} + {% if value is none -%} + {% do edx_service_config.pop(key) %} + {%- endif %} + {%- endfor %} +{%- endif %} + +{{ edx_service_config | to_nice_yaml }} diff --git a/playbooks/roles/edx_service_with_rendered_config/defaults/main.yml b/playbooks/roles/edx_service_with_rendered_config/defaults/main.yml new file mode 100644 index 00000000000..a06f7d3f4d3 --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/defaults/main.yml @@ -0,0 +1,37 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role edx_service_with_rendered_config +# + +# +# vars are namespace with the module name. +# +edx_service_with_rendered_config_service_name: edx_service_with_rendered_config + +edx_service_with_rendered_config_repos: [] + +# A few roles meta this role but don't need a config file written +# this allows them to not pass a config and the tasks will skip +# and not write out a config at all. +edx_service_with_rendered_config_service_config: {} + +# If you would like edx_service_with_rendered_config to strip out !!null settings before writing out +# the yaml config, set this to true. +edx_service_with_rendered_config_filter_nones: false + +# +# OS packages +# +edx_service_with_rendered_config_packages: + debian: [] + redhat: [] + +edx_service_with_rendered_config_local_config_file: "{{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_with_rendered_config_service_name }}.yml" diff --git a/playbooks/roles/edx_service_with_rendered_config/meta/main.yml b/playbooks/roles/edx_service_with_rendered_config/meta/main.yml new file mode 100644 index 00000000000..6ba0b115b12 --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/meta/main.yml @@ -0,0 +1,29 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role edx_service_with_rendered_config +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - role: edx_service + edx_service_name: "{{ edx_service_with_rendered_config_service_name }}" + edx_service_config: "{{ edx_service_with_rendered_config_service_config }}" + edx_service_user: "{{ edx_service_with_rendered_config_user }}" + edx_service_home: "{{ edx_service_with_rendered_config_home }}" + edx_service_packages: "{{ edx_service_with_rendered_config_packages }}" + edx_service_repos: "{{ edx_service_with_rendered_config_repos }}" diff --git a/playbooks/roles/edx_service_with_rendered_config/tasks/main.yml b/playbooks/roles/edx_service_with_rendered_config/tasks/main.yml new file mode 100644 index 00000000000..92bf7342075 --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/tasks/main.yml @@ -0,0 +1,56 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx_service_with_rendered_config +# +# Overview: +# +# This role performs the repetive tasks that most edX roles +# require in our default configuration. +# +# Example play: +# +# Rather than being included in the play, this role +# is included as a dependency by other roles in the meta/main.yml +# file. The including role should add the following +# depency definition. +# +# dependencies: +# - role: edx_service_with_rendered_config +# edx_service_with_rendered_config_servicee_name: "hotg" +# edx_service_with_rendered_config_servicee_config: "{{ structure_to_be_written_to_config_file_in_/edx/etc }}" +# edx_service_with_rendered_config_repos: +# - PROTOCOL: [https/ssh] +# DOMAIN: github.com +# PATH: edx +# REPO: hotg +# VERSION: master +# DESTINATION: "/edx/app/hotg/hotg" +# SSH_KEY: +# - PROTOCOL +# ... +# edx_service_with_rendered_config_service_name: hotg_system_user +# edx_service_with_rendered_config_home: "/edx/app/hotg" +# edx_service_with_rendered_config_packages: +# debian: [ pkg1, pkg2, pkg3 ] +# redhat: [ pkg4, pkg5 ] +# + +- name: Write out app config file + template: + src: "config.yml.j2" + dest: "{{ COMMON_CFG_DIR }}/{{ edx_service_with_rendered_config_service_name }}.yml" + mode: "0644" + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration diff --git a/playbooks/roles/edx_service_with_rendered_config/templates/config.yml.j2 b/playbooks/roles/edx_service_with_rendered_config/templates/config.yml.j2 new file mode 100644 index 00000000000..a3e80ff6f5d --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/templates/config.yml.j2 @@ -0,0 +1,12 @@ +--- +# {{ ansible_managed }} + +{% if edx_service_with_rendered_config_filter_nones -%} + {% for key, value in edx_service_with_rendered_config_service_config.copy().items() -%} + {% if value is none -%} + {% do edx_service_with_rendered_config_service_config.pop(key) %} + {%- endif %} + {%- endfor %} +{%- endif %} + +{{ edx_service_with_rendered_config_service_config | to_nice_yaml }} diff --git a/playbooks/roles/edx_themes/defaults/main.yml b/playbooks/roles/edx_themes/defaults/main.yml new file mode 100644 index 00000000000..d7849a0e8dd --- /dev/null +++ b/playbooks/roles/edx_themes/defaults/main.yml @@ -0,0 +1,36 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role themes +# + +themes_service_name: "edx-themes" +themes_user: "{{ themes_service_name }}" +themes_group: "www-data" +themes_home: "{{ COMMON_DATA_DIR }}/{{ themes_service_name }}" + +THEMES_CODE_DIR: "{{ themes_home }}/{{ themes_service_name }}" + +THEMES_GIT_IDENTITY: !!null +THEMES_GIT_PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" +THEMES_GIT_MIRROR: "{{ COMMON_GIT_MIRROR }}" +THEMES_GIT_PATH: "{{ COMMON_GIT_PATH }}" +THEMES_REPO: "sample-themes.git" +THEMES_VERSION: "master" + + +THEMES_REPOS: + - PROTOCOL: "{{ THEMES_GIT_PROTOCOL }}" + DOMAIN: "{{ THEMES_GIT_MIRROR }}" + PATH: "{{ THEMES_GIT_PATH }}" + REPO: "{{ THEMES_REPO }}" + VERSION: "{{ THEMES_VERSION }}" + DESTINATION: "{{ THEMES_CODE_DIR }}" + SSH_KEY: "{{ THEMES_GIT_IDENTITY }}" diff --git a/playbooks/roles/edx_themes/meta/main.yml b/playbooks/roles/edx_themes/meta/main.yml new file mode 100644 index 00000000000..a5486bb33f6 --- /dev/null +++ b/playbooks/roles/edx_themes/meta/main.yml @@ -0,0 +1,22 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role edx_themes +# +dependencies: + - role: add_user + user_name: "{{ themes_user }}" + user_home: "{{ themes_home }}" + group_name: "{{ themes_group }}" + - role: git_clone + repo_owner: "{{ themes_user }}" + repo_group: "{{ themes_group }}" + GIT_REPOS: "{{ THEMES_REPOS }}" + git_home: "{{ themes_home }}" diff --git a/playbooks/roles/edx_themes/tasks/main.yml b/playbooks/roles/edx_themes/tasks/main.yml new file mode 100644 index 00000000000..fa2f35b681e --- /dev/null +++ b/playbooks/roles/edx_themes/tasks/main.yml @@ -0,0 +1,60 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role themes +# +# Overview: +# +# This role performs the repetive tasks that most edX roles +# require in our default configuration. +# +# Example play: +# +# Rather than being included in the play, this role +# is included as a dependency by other roles in the meta/main.yml +# file. The including role should add the following +# dependency definition. +# +# dependencies: +# - role: edx_themes +# theme_users: +# - ecommerce +# - edxapp +# when do_setup_themes +# + +# make sure edx-theme's group has read/write access to themes directory +- name: ensure edx-theme's group has read/write access to themes directory + file: + path: "{{ themes_home }}" + state: directory + recurse: yes + owner: "{{ themes_user }}" + group: "{{ themes_group }}" + mode: "g+rw" + tags: + - install + - install:base + +- name: Add theme users to theme's group so that that have read/write access to themes directories + user: + name: "{{ item }}" + shell: /bin/bash + groups: "{{ themes_group }}" + append: yes + with_items: "{{ theme_users }}" + when: theme_users is defined + +- name: update .bashrc to set umask value + lineinfile: + dest: "{{ themes_home }}/.bashrc" + line: "umask 002" + state: present diff --git a/playbooks/roles/edxapp/defaults/main.yml b/playbooks/roles/edxapp/defaults/main.yml index 055e9560c3f..3cc1a16a9b6 100644 --- a/playbooks/roles/edxapp/defaults/main.yml +++ b/playbooks/roles/edxapp/defaults/main.yml @@ -1,4 +1,4 @@ -# variables common to the lms role, automatically loaded +# variables common to the edxapp role, automatically loaded # when the role is included --- @@ -11,132 +11,1100 @@ # # Defaults specified here should not contain # any secrets or host identifying information. +# + +EDXAPP_PYTHON_VERSION: "python3.8" + +# Bucket used for xblock file storage +EDXAPP_XBLOCK_FS_STORAGE_BUCKET: !!null +EDXAPP_XBLOCK_FS_STORAGE_PREFIX: !!null +EDXAPP_DJFS: + type: 'osfs' + directory_root: '{{ edxapp_data_dir }}/django-pyfs/static/django-pyfs' + url_root : '/static/django-pyfs' + +EDXAPP_LMS_BASE: "{{ EDXAPP_LMS_SITE_NAME }}:{{ EDXAPP_LMS_NGINX_PORT }}" +EDXAPP_PREVIEW_LMS_BASE: "preview.{{ EDXAPP_LMS_SITE_NAME }}:{{ EDXAPP_LMS_NGINX_PORT }}" +EDXAPP_CMS_BASE: "{{ EDXAPP_CMS_SITE_NAME }}:{{ EDXAPP_CMS_NGINX_PORT }}" + +EDXAPP_LMS_GUNICORN_EXTRA: "" +EDXAPP_LMS_GUNICORN_EXTRA_CONF: "" +EDXAPP_LMS_GUNICORN_TIMEOUT: 300 -EDXAPP_LMS_BASE: '' -EDXAPP_PREVIEW_LMS_BASE: '' -EDXAPP_CMS_BASE: '' -EDXAPP_AWS_ACCESS_KEY_ID: '' -EDXAPP_AWS_SECRET_ACCESS_KEY: '' -EDXAPP_XQUEUE_BASIC_AUTH: [ 'edx', 'edx' ] +EDXAPP_CMS_GUNICORN_EXTRA: "" +EDXAPP_CMS_GUNICORN_EXTRA_CONF: "" +EDXAPP_CMS_GUNICORN_TIMEOUT: 300 + +# Set this to the maximum number +# of requests for gunicorn for the lms and cms +# gunicorn --max-requests +EDXAPP_LMS_MAX_REQ: !!null +EDXAPP_CMS_MAX_REQ: !!null +# 'None' will be written out as null in +# the configuration on disk +EDXAPP_AWS_ACCESS_KEY_ID: !!null +EDXAPP_AWS_SECRET_ACCESS_KEY: !!null +EDXAPP_AWS_QUERYSTRING_AUTH: false +EDXAPP_AWS_STORAGE_BUCKET_NAME: "SET-ME-PLEASE (ex. bucket-name)" +# An empty string makes the course import/export functionality to use the +# file system for storage. Setting this to a bucket-name will use AWS +EDXAPP_IMPORT_EXPORT_BUCKET: "" +EDXAPP_AWS_S3_CUSTOM_DOMAIN: "SET-ME-PLEASE (ex. bucket-name.s3.amazonaws.com)" +EDXAPP_SWIFT_USERNAME: !!null +EDXAPP_SWIFT_KEY: !!null +EDXAPP_SWIFT_TENANT_ID: !!null +EDXAPP_SWIFT_TENANT_NAME: !!null +EDXAPP_SWIFT_AUTH_URL: !!null +EDXAPP_SWIFT_AUTH_VERSION: !!null +EDXAPP_SWIFT_REGION_NAME: !!null +EDXAPP_SWIFT_USE_TEMP_URLS: false +EDXAPP_SWIFT_TEMP_URL_KEY: !!null +EDXAPP_SWIFT_TEMP_URL_DURATION: 1800 # seconds +EDXAPP_DEFAULT_FILE_STORAGE: "django.core.files.storage.FileSystemStorage" +EDXAPP_XQUEUE_BASIC_AUTH: [ "{{ COMMON_HTPASSWD_USER }}", "{{ COMMON_HTPASSWD_PASS }}" ] EDXAPP_XQUEUE_DJANGO_AUTH: username: 'lms' - password: 'password' + password: "{{ COMMON_XQUEUE_LMS_PASSWORD }}" EDXAPP_XQUEUE_URL: 'http://localhost:18040/' -EDXAPP_MONGO_HOSTS: ['localhost'] +# Comma-separated list of hosts/ips +EDXAPP_MONGO_HOSTS: 'localhost' EDXAPP_MONGO_PASSWORD: 'password' EDXAPP_MONGO_PORT: 27017 EDXAPP_MONGO_USER: 'edxapp' EDXAPP_MONGO_DB_NAME: 'edxapp' +EDXAPP_MONGO_USE_SSL: False +EDXAPP_MONGO_REPLICA_SET: null +EDXAPP_MONGO_AUTH_DB: '' +# Used only if EDXAPP_MONGO_REPLICA_SET is provided. +EDXAPP_MONGO_CMS_READ_PREFERENCE: 'PRIMARY' +EDXAPP_MONGO_LMS_READ_PREFERENCE: 'SECONDARY_PREFERRED' +# We use the CMS read_preference here because the draft docstore's view of +# the modulestore should mirror Studio's, so that instructors can check their +# changes in Preview mode. +EDXAPP_LMS_DRAFT_DOC_STORE_READ_PREFERENCE: '{{ EDXAPP_MONGO_CMS_READ_PREFERENCE }}' +EDXAPP_LMS_SPLIT_DOC_STORE_READ_PREFERENCE: '{{ EDXAPP_MONGO_LMS_READ_PREFERENCE }}' EDXAPP_MYSQL_DB_NAME: 'edxapp' EDXAPP_MYSQL_USER: 'edxapp001' +EDXAPP_MYSQL_USER_ADMIN: 'root' EDXAPP_MYSQL_PASSWORD: 'password' +EDXAPP_MYSQL_PASSWORD_READ_ONLY: 'password' +EDXAPP_MYSQL_PASSWORD_ADMIN: 'password' +# From Django 2.0 the default isolation level used for the MySQL database backend is 'read commited' +# (refer to https://github.com/django/django/pull/7978). However, this isolation level is enforced +# from the Django database configuration options to prevent possible inconsistencies or malfunctions. +# Changing the isolation level can lead to unexpected behaviors, so please proceed only if you +# what you're doing. Refer to https://docs.djangoproject.com/en/2.2/ref/databases/#mysql-isolation-level +# to get further information. +EDXAPP_MYSQL_OPTIONS: + isolation_level: "read committed" +EDXAPP_MYSQL_ATOMIC_REQUESTS: True +EDXAPP_MYSQL_REPLICA_DB_NAME: "{{ EDXAPP_MYSQL_DB_NAME }}" +EDXAPP_MYSQL_REPLICA_USER: "{{ EDXAPP_MYSQL_USER }}" +EDXAPP_MYSQL_REPLICA_PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}" +EDXAPP_MYSQL_REPLICA_HOST: "{{ EDXAPP_MYSQL_HOST }}" +EDXAPP_MYSQL_REPLICA_PORT: "{{ EDXAPP_MYSQL_PORT }}" +EDXAPP_MYSQL_REPLICA_OPTIONS: "{{ EDXAPP_MYSQL_OPTIONS }}" +EDXAPP_MYSQL_CSMH_DB_NAME: "edxapp_csmh" +EDXAPP_MYSQL_CSMH_USER: "{{ EDXAPP_MYSQL_USER }}" +EDXAPP_MYSQL_CSMH_PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}" +EDXAPP_MYSQL_CSMH_HOST: "{{ EDXAPP_MYSQL_HOST }}" +EDXAPP_MYSQL_CSMH_PORT: "{{ EDXAPP_MYSQL_PORT }}" +EDXAPP_MYSQL_CSMH_OPTIONS: "{{ EDXAPP_MYSQL_OPTIONS }}" + +# This is Django's default https://docs.djangoproject.com/en/1.8/ref/settings/#conn-max-age +EDXAPP_MYSQL_CONN_MAX_AGE: 0 + EDXAPP_MYSQL_HOST: 'localhost' EDXAPP_MYSQL_PORT: '3306' +EDXAPP_SEARCH_HOST: 'localhost' +EDXAPP_SEARCH_PORT: 9200 +EDXAPP_SEARCH_USE_SSL: false + +# list of dictionaries of the format +# { 'host': 'hostname', 'port': 'portnumber', 'otherconfigsuchas use_ssl': 'True' } +# http://elasticsearch-py.readthedocs.org/en/master/api.html#elasticsearch +EDXAPP_ELASTIC_SEARCH_CONFIG: + - host: "{{ EDXAPP_SEARCH_HOST }}" + port: "{{ EDXAPP_SEARCH_PORT }}" + use_ssl: "{{ EDXAPP_SEARCH_USE_SSL }}" + +EDXAPP_SETTINGS: '{{ COMMON_EDXAPP_SETTINGS }}' + +EDXAPP_LMS_ENV: 'lms.envs.{{ EDXAPP_SETTINGS }}' +EDXAPP_CMS_ENV: 'cms.envs.{{ EDXAPP_SETTINGS }}' + EDXAPP_EMAIL_BACKEND: 'django.core.mail.backends.smtp.EmailBackend' +EDXAPP_EMAIL_HOST: 'localhost' +EDXAPP_EMAIL_PORT: 25 +EDXAPP_EMAIL_USE_TLS: False +EDXAPP_EMAIL_HOST_USER: '' +EDXAPP_EMAIL_HOST_PASSWORD: '' + +EDXAPP_AWS_SES_REGION_NAME: "us-east-1" +EDXAPP_AWS_SES_REGION_ENDPOINT: "email.us-east-1.amazonaws.com" EDXAPP_LOG_LEVEL: 'INFO' EDXAPP_MEMCACHE: [ 'localhost:11211' ] +EDXAPP_CACHE_COURSE_STRUCTURE_MEMCACHE: "{{ EDXAPP_MEMCACHE }}" +EDXAPP_CACHE_BACKEND: 'django.core.cache.backends.memcached.PyMemcacheCache' EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:18080/' +# EDXAPP_COMMENTS_SERVICE_KEY must match FORUM_API_KEY EDXAPP_COMMENTS_SERVICE_KEY: 'password' -EDXAPP_EDXAPP_SECRET_KEY: '' +EDXAPP_EDXAPP_SECRET_KEY: "DUMMY KEY CHANGE BEFORE GOING TO PRODUCTION" + +EDXAPP_FERNET_KEYS: + - "DUMMY KEY CHANGE BEFORE GOING TO PRODUCTION" + +EDXAPP_ENABLE_LTI_PROVIDER: false +EDXAPP_LTI_USER_EMAIL_DOMAIN: "lti.example.com" +# 900s, or 15 mins +EDXAPP_LTI_AGGREGATE_SCORE_PASSBACK_DELAY: 900 +EDXAPP_PAYMENT_SUPPORT_EMAIL: "billing@example.com" +EDXAPP_YOUTUBE_API_KEY: "PUT_YOUR_API_KEY_HERE" +EDXAPP_ZENDESK_USER: "" +EDXAPP_ZENDESK_URL: "" +EDXAPP_ZENDESK_API_KEY: "" +EDXAPP_ZENDESK_CUSTOM_FIELDS: {} +EDXAPP_ZENDESK_OAUTH_ACCESS_TOKEN: "" +EDXAPP_ZENDESK_GROUP_ID_MAPPING: {} +EDXAPP_CELERY_USER: '' +EDXAPP_CELERY_PASSWORD: '' +EDXAPP_CELERY_BROKER_HOSTNAME: "{{ EDXAPP_REDIS_HOSTNAME }}" +EDXAPP_CELERY_BROKER_TRANSPORT: 'redis' +EDXAPP_CELERY_BROKER_VHOST: "" +EDXAPP_CELERY_BROKER_USE_SSL: false +EDXAPP_CELERY_EVENT_QUEUE_TTL: !!null +EDXAPP_CELERY_TIMEZONE: "UTC" +EDXAPP_CELERYBEAT_SCHEDULER: "celery.beat:PersistentScheduler" +EDXAPP_CELERY_RESULT_BACKEND: "django-cache" +EDXAPP_ENABLE_CELERY_BEAT: false +EDXAPP_SINGLE_BEAT_LOCK_TIME: 60 +# EDXAPP_SINGLE_BEAT_HEARTBEAT_INTERVAL must be smaller than EDXAPP_SINGLE_BEAT_LOCK_TIME / 2 +EDXAPP_SINGLE_BEAT_HEARTBEAT_INTERVAL: 29 +EDXAPP_SINGLE_BEAT_REPO: "https://github.com/akachanov/single-beat.git" +EDXAPP_SINGLE_BEAT_VERSION: "e500ac4b56756cdf96836666883af8060aaef455" +EDXAPP_SINGLE_BEAT_USER: "{{ EDXAPP_CELERY_USER }}" +EDXAPP_SINGLE_BEAT_PASSWORD: "{{ EDXAPP_CELERY_PASSWORD }}" +EDXAPP_SINGLE_BEAT_IDENTIFIER: "celerybeat" +EDXAPP_BRANCH_IO_KEY: "" + +EDXAPP_AUTH_USE_OPENID_PROVIDER: true +EDXAPP_ENABLE_COMBINED_LOGIN_REGISTRATION: true +EDXAPP_ENABLE_COUNTRY_ACCESS: false +EDXAPP_ENABLE_CORS_HEADERS: false +EDXAPP_ENABLE_CROSS_DOMAIN_CSRF_COOKIE: false +EDXAPP_ENABLE_DISCUSSION_HOME_PANEL: true +EDXAPP_ENABLE_DISCUSSION_SERVICE: true +EDXAPP_ENABLE_GRADE_DOWNLOADS: true +EDXAPP_ENABLE_SPECIAL_EXAMS: false +EDXAPP_ENABLE_VIDEO_UPLOAD_PIPELINE: false + +EDXAPP_VIDEO_CDN_URLS: + EXAMPLE_COUNTRY_CODE: "http://example.com/edx/video?s3_url=" + +EDXAPP_CREDIT_HELP_LINK_URL: "" + +EDXAPP_PARTNER_SUPPORT_EMAIL: '' + +EDXAPP_PLATFORM_NAME: 'Your Platform Name Here' +EDXAPP_PLATFORM_DESCRIPTION: 'Your Platform Description Here' +EDXAPP_STUDIO_NAME: 'Studio' +EDXAPP_STUDIO_SHORT_NAME: 'Studio' +EDXAPP_ANALYTICS_DASHBOARD_NAME: "{{ EDXAPP_PLATFORM_NAME }} Insights" + +EDXAPP_LMS_ANALYTICS_API_KEY: '' +EDXAPP_LMS_ANALYTICS_API_URL: 'http://localhost:18100/' + +EDXAPP_CAS_SERVER_URL: "" +EDXAPP_CAS_EXTRA_LOGIN_PARAMS: "" +EDXAPP_CAS_ATTRIBUTE_CALLBACK: "" +EDXAPP_CAS_ATTRIBUTE_PACKAGE: "" +# Enable an end-point that creates a user and logs them in +# Used for performance testing +EDXAPP_ENABLE_AUTO_AUTH: false +# Settings for enabling and configuring third party authorization +EDXAPP_ENABLE_THIRD_PARTY_AUTH: true +EDXAPP_ENABLE_OAUTH2_PROVIDER: false +EDXAPP_THIRD_PARTY_AUTH_BACKENDS: + - social_core.backends.google.GoogleOAuth2 + - social_core.backends.linkedin.LinkedinOAuth2 + - social_core.backends.facebook.FacebookOAuth2 + - social_core.backends.azuread.AzureADOAuth2 + - common.djangoapps.third_party_auth.appleid.AppleIdAuth + - common.djangoapps.third_party_auth.identityserver3.IdentityServer3 + - common.djangoapps.third_party_auth.saml.SAMLAuthBackend + - common.djangoapps.third_party_auth.lti.LTIAuthBackend + +EDXAPP_CMS_SERVICE_NAME: 'edxapp-cms' +EDXAPP_CMS_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'edxapp-cms-sso-key' +EDXAPP_CMS_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'edxapp-cms-sso-secret' +EDXAPP_CMS_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'edxapp-cms-backend-service-key' +EDXAPP_CMS_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'edxapp-cms-backend-service-secret' + +EDXAPP_ENABLE_MOBILE_REST_API: false + +EDXAPP_ENABLE_BULK_ENROLLMENT_VIEW: false + +# Settings for API access management +EDXAPP_API_ACCESS_MANAGER_EMAIL: "api-access@example.com" +EDXAPP_API_ACCESS_FROM_EMAIL: "api-requests@example.com" +EDXAPP_API_DOCUMENTATION_URL: "http://course-catalog-api-guide.readthedocs.io/en/latest/" +EDXAPP_AUTH_DOCUMENTATION_URL: "http://course-catalog-api-guide.readthedocs.io/en/latest/authentication/index.html" + +# Settings for affiliate cookie tracking +EDXAPP_AFFILIATE_COOKIE_NAME: 'dev_affiliate_id' + +EDXAPP_ENABLE_EDXNOTES: false + +EDXAPP_ENABLE_CREDIT_ELIGIBILITY: false +EDXAPP_ENABLE_CREDIT_API: false + +EDXAPP_CUSTOM_COURSES_EDX: false + +EDXAPP_ENABLE_SYSADMIN_DASHBOARD: false +# This is different from lms/envs/common.py +# We're turning it on in config because it needs a lot more configuration +# support, such as settings.DATABASES and initial database creation which is +# handled in the play. +EDXAPP_ENABLE_CSMH_EXTENDED: True +EDXAPP_ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES: True + +EDXAPP_GIT_REPO_DIR: '/edx/var/edxapp/course_repos' +EDXAPP_GIT_REPO_EXPORT_DIR: '/edx/var/edxapp/export_course_repos' +EDXAPP_ENABLE_EXPORT_GIT: false + +EDXAPP_FINANCIAL_REPORTS: + BUCKET: !!null + ROOT_PATH: "sandbox" + STORAGE_TYPE: "localfs" -EDXAPP_OEE_URL: 'http://localhost:18060/' -EDXAPP_OEE_USER: 'lms' -EDXAPP_OEE_PASSWORD: 'password' +#Only address should have newlines +EDXAPP_PDF_RECEIPT_BILLING_ADDRESS: | + Enter your receipt billing + address here. -EDXAPP_ANALYTICS_API_KEY: '' -EDXAPP_ZENDESK_USER: '' -EDXAPP_ZENDESK_API_KEY: '' -EDXAPP_CELERY_USER: 'celery' -EDXAPP_CELERY_PASSWORD: 'celery' +EDXAPP_PDF_RECEIPT_DISCLAIMER_TEXT: > + ENTER YOUR RECEIPT DISCLAIMER TEXT HERE. -EDXAPP_PLATFORM_NAME: 'edX' +EDXAPP_PDF_RECEIPT_FOOTER_TEXT: > + Enter your receipt footer text here. -EDXAPP_FEATURES: - AUTH_USE_OPENID_PROVIDER: true - CERTIFICATES_ENABLED: true - ENABLE_DISCUSSION_SERVICE: true - ENABLE_INSTRUCTOR_ANALYTICS: true - SUBDOMAIN_BRANDING: false - SUBDOMAIN_COURSE_LISTINGS: false - PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE - ENABLE_S3_GRADE_DOWNLOADS: true - USE_CUSTOM_THEME: $edxapp_use_custom_theme +EDXAPP_PDF_RECEIPT_TERMS_AND_CONDITIONS: > + Enter your receipt terms and conditions here. + +EDXAPP_PDF_RECEIPT_TAX_ID: "00-0000000" +EDXAPP_PDF_RECEIPT_TAX_ID_LABEL: "fake Tax ID" +EDXAPP_PDF_RECEIPT_COBRAND_LOGO_PATH: "" +EDXAPP_PDF_RECEIPT_LOGO_PATH: "" + +EDXAPP_SOCIAL_AUTH_OAUTH_SECRETS: "" + +EDXAPP_ACE_CHANNEL_SAILTHRU_API_KEY: "" +EDXAPP_ACE_CHANNEL_SAILTHRU_API_SECRET: "" +EDXAPP_ACE_ENABLED_CHANNELS: ['django_email'] +EDXAPP_ACE_ENABLED_POLICIES: ['bulk_email_optout'] +EDXAPP_ACE_CHANNEL_SAILTHRU_DEBUG: True +EDXAPP_ACE_CHANNEL_SAILTHRU_TEMPLATE_NAME: !!null +EDXAPP_ACE_ROUTING_KEY: 'edx.lms.core.default' +EDXAPP_ACE_CHANNEL_DEFAULT_EMAIL: 'django_email' +EDXAPP_ACE_CHANNEL_TRANSACTIONAL_EMAIL: 'django_email' + +EDXAPP_ORGANIZATIONS_AUTOCREATE: true + +# Display a language selector in the LMS/CMS header. +EDXAPP_SHOW_HEADER_LANGUAGE_SELECTOR: false + +# Display a language selector in the LMS footer. +EDXAPP_SHOW_FOOTER_LANGUAGE_SELECTOR: false + +# Configure x_frame_options in LMS/CMS +EDXAPP_X_FRAME_OPTIONS: "DENY" + +# Reset functionality for master's integration environments +EDXAPP_ENABLE_ENROLLMENT_RESET: false + +EDXAPP_FEATURES_DEFAULT: + AUTH_USE_OPENID_PROVIDER: "{{ EDXAPP_AUTH_USE_OPENID_PROVIDER }}" + ENABLE_DISCUSSION_SERVICE: "{{ EDXAPP_ENABLE_DISCUSSION_SERVICE }}" + PREVIEW_LMS_BASE: "{{ EDXAPP_PREVIEW_LMS_BASE }}" + ENABLE_GRADE_DOWNLOADS: "{{ EDXAPP_ENABLE_GRADE_DOWNLOADS }}" + ENABLE_MKTG_SITE: "{{ EDXAPP_ENABLE_MKTG_SITE }}" + ENABLE_PUBLISHER: "{{ EDXAPP_ENABLE_PUBLISHER }}" + AUTOMATIC_AUTH_FOR_TESTING: "{{ EDXAPP_ENABLE_AUTO_AUTH }}" + ENABLE_THIRD_PARTY_AUTH: "{{ EDXAPP_ENABLE_THIRD_PARTY_AUTH }}" + ENABLE_BULK_ENROLLMENT_VIEW: "{{ EDXAPP_ENABLE_BULK_ENROLLMENT_VIEW }}" + ENABLE_VIDEO_UPLOAD_PIPELINE: "{{ EDXAPP_ENABLE_VIDEO_UPLOAD_PIPELINE }}" + ENABLE_DISCUSSION_HOME_PANEL: "{{ EDXAPP_ENABLE_DISCUSSION_HOME_PANEL }}" + ENABLE_COMBINED_LOGIN_REGISTRATION: "{{ EDXAPP_ENABLE_COMBINED_LOGIN_REGISTRATION }}" + ENABLE_CORS_HEADERS: "{{ EDXAPP_ENABLE_CORS_HEADERS }}" + ENABLE_CROSS_DOMAIN_CSRF_COOKIE: "{{ EDXAPP_ENABLE_CROSS_DOMAIN_CSRF_COOKIE }}" + ENABLE_COUNTRY_ACCESS: "{{ EDXAPP_ENABLE_COUNTRY_ACCESS }}" + ENABLE_EDXNOTES: "{{ EDXAPP_ENABLE_EDXNOTES }}" + ENABLE_CREDIT_API: "{{ EDXAPP_ENABLE_CREDIT_API }}" + ENABLE_CREDIT_ELIGIBILITY: "{{ EDXAPP_ENABLE_CREDIT_ELIGIBILITY }}" + ENABLE_LTI_PROVIDER: "{{ EDXAPP_ENABLE_LTI_PROVIDER }}" + ENABLE_SPECIAL_EXAMS: "{{ EDXAPP_ENABLE_SPECIAL_EXAMS }}" + ENABLE_OAUTH2_PROVIDER: "{{ EDXAPP_ENABLE_OAUTH2_PROVIDER }}" + ENABLE_SYSADMIN_DASHBOARD: "{{ EDXAPP_ENABLE_SYSADMIN_DASHBOARD }}" + ENABLE_MOBILE_REST_API: "{{ EDXAPP_ENABLE_MOBILE_REST_API }}" + CUSTOM_COURSES_EDX: "{{ EDXAPP_CUSTOM_COURSES_EDX }}" + ENABLE_CSMH_EXTENDED: "{{ EDXAPP_ENABLE_CSMH_EXTENDED }}" + ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES: "{{ EDXAPP_ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES }}" + SHOW_HEADER_LANGUAGE_SELECTOR: "{{ EDXAPP_SHOW_HEADER_LANGUAGE_SELECTOR }}" + SHOW_FOOTER_LANGUAGE_SELECTOR: "{{ EDXAPP_SHOW_FOOTER_LANGUAGE_SELECTOR }}" + ENABLE_ENROLLMENT_RESET: "{{ EDXAPP_ENABLE_ENROLLMENT_RESET }}" + ENABLE_EXPORT_GIT: "{{ EDXAPP_ENABLE_EXPORT_GIT }}" + +EDXAPP_FEATURES_EXTRA: {} + +EDXAPP_FEATURES: "{{ EDXAPP_FEATURES_DEFAULT | combine(EDXAPP_FEATURES_EXTRA) }}" -EDXAPP_BOOK_URL: '' # This needs to be set to localhost # if xqueue is run on the same server # as the lms (it's sent in the request) EDXAPP_SITE_NAME: 'localhost' -EDXAPP_MEDIA_URL: '' -EDXAPP_ANALYTICS_SERVER_URL: '' -EDXAPP_FEEDBACK_SUBMISSION_EMAIL: '' -EDXAPP_CELERY_BROKER_HOSTNAME: '' +EDXAPP_LMS_SITE_NAME: "{{ EDXAPP_SITE_NAME }}" +EDXAPP_LMS_HTTPS: 'on' +EDXAPP_CMS_SITE_NAME: 'localhost' +EDXAPP_MEDIA_URL: "/media" +EDXAPP_FEEDBACK_SUBMISSION_EMAIL: "" EDXAPP_LOGGING_ENV: 'sandbox' -EDXAPP_SYSLOG_SERVER: '' +EDXAPP_SYSLOG_SERVER: "" EDXAPP_RABBIT_HOSTNAME: 'localhost' -EDXAPP_XML_MAPPINGS: {} +EDXAPP_REDIS_HOSTNAME: '{{ EDXAPP_RABBIT_HOSTNAME }}' EDXAPP_LMS_NGINX_PORT: 18000 EDXAPP_LMS_SSL_NGINX_PORT: 48000 -EDXAPP_LMS_PREVIEW_NGINX_PORT: 18020 EDXAPP_CMS_NGINX_PORT: 18010 EDXAPP_CMS_SSL_NGINX_PORT: 48010 +# NGINX Rate limiting related vars +EDXAPP_ENABLE_RATE_LIMITING: false +EDXAPP_COURSES_REQUEST_RATE: '5r/s' +EDXAPP_COURSES_REQUEST_BURST_RATE: 10 +EDXAPP_COURSES_USER_AGENT_BURST_RATE: 5 +EDXAPP_RATE_LIMITED_USER_AGENTS: [] + +# Consumed in the edxapp.yml playbook +# to pass to nginx as nginx_skip_enable_sites +EDXAPP_NGINX_SKIP_ENABLE_SITES: False + +# If the LMS and Studio run on the same machine / nginx, it makes sense to have +# a default (usually the LMS). If you run on separate machines, you'll want to mark +# them both as defaults. +EDXAPP_NGINX_DEFAULT_SITES: + - 'lms' + EDXAPP_LANG: 'en_US.UTF-8' +EDXAPP_LANGUAGE_CODE : 'en' +EDXAPP_LANGUAGE_COOKIE: 'openedx-language-preference' +EDXAPP_CERTIFICATE_TEMPLATE_LANGUAGES: + 'en': 'English' + 'es': 'Español' EDXAPP_TIME_ZONE: 'America/New_York' +EDXAPP_HELP_TOKENS_BOOKS: + learner: "http://edx.readthedocs.io/projects/open-edx-learner-guide" + course_author: "http://edx.readthedocs.io/projects/open-edx-building-and-running-a-course" + EDXAPP_TECH_SUPPORT_EMAIL: 'technical@example.com' EDXAPP_CONTACT_EMAIL: 'info@example.com' EDXAPP_BUGS_EMAIL: 'bugs@example.com' EDXAPP_DEFAULT_FROM_EMAIL: 'registration@example.com' EDXAPP_DEFAULT_FEEDBACK_EMAIL: 'feedback@example.com' -EDXAPP_DEFAULT_SERVER_EMAIL: 'devops@example.com' +EDXAPP_DEFAULT_SERVER_EMAIL: 'sre@example.com' EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL: 'no-reply@example.com' +EDXAPP_BULK_EMAIL_LOG_SENT_EMAILS: false +EDXAPP_UNIVERSITY_EMAIL: 'university@example.com' +EDXAPP_PRESS_EMAIL: 'press@example.com' +EDXAPP_LMS_ROOT_URL: "{{ EDXAPP_LMS_BASE_SCHEME | default('https') }}://{{ EDXAPP_LMS_BASE }}" +EDXAPP_LMS_INTERNAL_ROOT_URL: "{{ EDXAPP_LMS_ROOT_URL }}" + +EDXAPP_LMS_ISSUER: "{{ COMMON_JWT_ISSUER }}" +EDXAPP_JWT_EXPIRATION: 30 # Number of seconds until expiration +EDXAPP_JWT_AUDIENCE: "{{ COMMON_JWT_AUDIENCE }}" +EDXAPP_JWT_SECRET_KEY: "{{ COMMON_JWT_SECRET_KEY }}" +EDXAPP_JWT_PUBLIC_SIGNING_JWK_SET: "{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}" +EDXAPP_JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' +EDXAPP_JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + +# See https://github.com/openedx/edx-platform/blob/master/openedx/core/djangoapps/oauth_dispatch/docs/decisions/0008-use-asymmetric-jwts.rst +EDXAPP_JWT_SIGNING_ALGORITHM: !!null +EDXAPP_JWT_PRIVATE_SIGNING_JWK: !!null + +EDXAPP_PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount' +EDXAPP_PLATFORM_FACEBOOK_ACCOUNT: 'http://www.facebook.com/YourPlatformFacebookAccount' +EDXAPP_FACEBOOK_APP_ID: "FACEBOOK_APP_ID" +EDXAPP_FACEBOOK_APP_SECRET: "FACEBOOK_APP_SECRET" +EDXAPP_FACEBOOK_API_VERSION: "v2.1" + +EDXAPP_CONTACT_MAILING_ADDRESS: 'SET-ME-PLEASE' + +EDXAPP_SOCIAL_MEDIA_FOOTER_URLS: {} +EDXAPP_MOBILE_STORE_URLS: {} +EDXAPP_FOOTER_ORGANIZATION_IMAGE: "images/logo.png" EDXAPP_ENV_EXTRA: {} EDXAPP_AUTH_EXTRA: {} +EDXAPP_LMS_ENV_EXTRA: "{{ EDXAPP_ENV_EXTRA }}" +EDXAPP_CMS_ENV_EXTRA: "{{ EDXAPP_ENV_EXTRA }}" +EDXAPP_LMS_AUTH_EXTRA: "{{ EDXAPP_AUTH_EXTRA }}" +EDXAPP_CMS_AUTH_EXTRA: "{{ EDXAPP_AUTH_EXTRA }}" +EDXAPP_ENABLE_MKTG_SITE: false +EDXAPP_ENABLE_PUBLISHER: false EDXAPP_MKTG_URL_LINK_MAP: {} -# Set this sets the url for static files +EDXAPP_MKTG_URLS: {} +EDXAPP_SUPPORT_SITE_LINK: '' +EDXAPP_ID_VERIFICATION_SUPPORT_LINK: '' +EDXAPP_ACTIVATION_EMAIL_SUPPORT_LINK: '' +EDXAPP_PASSWORD_RESET_SUPPORT_LINK: '' +EDXAPP_EDXMKTG_USER_INFO_COOKIE_NAME: "edx-user-info" + +# Settings for Grade downloads +EDXAPP_GRADE_STORAGE_CLASS: 'django.core.files.storage.FileSystemStorage' +EDXAPP_GRADE_STORAGE_KWARGS: + location: /tmp/edx-s3/grades + +# These set the url for static files # Override this var to use a CDN -# Example: xxxxx.cloudfront.net/static/ +# Example: http://xxxxx.cloudfront.net/static/ + +# Default variable, likely to be retired since LMS and CMS are being separated EDXAPP_STATIC_URL_BASE: "/static/" +# If you would like LMS to use a different CDN or path +EDXAPP_LMS_STATIC_URL_BASE: "{{ EDXAPP_STATIC_URL_BASE }}" +# If you would like Studio to use a different CDN or path +EDXAPP_CMS_STATIC_URL_BASE: "{{ EDXAPP_STATIC_URL_BASE }}" -# Settings for Grade downloads -EDXAPP_GRADE_STORAGE_TYPE: 'localfs' -EDXAPP_GRADE_BUCKET: 'edx-grades' -EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades' +# does not affect verified students +EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY: ['usd', '$'] -# Configure rake tasks in edx-platform to skip Python/Ruby/Node installation +# Configure paver tasks in edx-platform to skip Python/Ruby/Node installation EDXAPP_NO_PREREQ_INSTALL: 1 # whether to setup the python codejail or not -EDXAPP_PYTHON_SANDBOX: false +EDXAPP_PYTHON_SANDBOX: true # this next setting, if true, turns on actual sandbox enforcement. If not true, # it puts the sandbox in 'complain' mode, for reporting but not enforcement EDXAPP_SANDBOX_ENFORCE: true -# Supply authorized keys used for remote management via the automated -# role, see meta/main.yml. Ensure you know what this does before -# enabling. The boolean flag determines whether the role is included. -# This is done to make it possible to disable remote access easily by -# setting the flag to true and providing an empty array. -EDXAPP_INCLUDE_AUTOMATOR_ROLE: false -EDXAPP_AUTOMATOR_AUTHORIZED_KEYS: [] +# Vars that are used when the automated role is "mixed-in" via the deploying play +# This data structure specifies all the users with access to run command remotely +# over SSH and the fully qualified command that they can run via sudo to the +# application user + +EDXAPP_AUTOMATED_USERS: {} + # automated_user: + # sudo_commands: + # - command: "{{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py lms showmigrations --settings={{ edxapp_settings }}" + # sudo_user: "edxapp" + # authorized_keys: + # - "SSH authorized key" EDXAPP_USE_GIT_IDENTITY: false -# Example: "{{ secure_dir }}/files/git-identity" -EDXAPP_LOCAL_GIT_IDENTITY: !!null +# Paste the contents of the git identity +# into this var +EDXAPP_GIT_IDENTITY: !!null + +EDXAPP_UPDATE_STATIC_FILES_KEY: false +# Set this to true if you want to install the private pip +# requirements in the edx-platform repo. +# This will use EDXAPP_GIT_IDENTITY, EDXAPP_USE_GIT_IDENTITY +# must be set to true if EDXAPP_INSTALL_PRIVATE_REQUIREMENTS is +# set to true + +EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false + +# List of additional python packages that should be installed into the +# edxapp virtual environment. +# `name` (required), `version` (optional), and `extra_args` (optional) +# are supported and correspond to the options of ansible's pip module. +# Example: +# EDXAPP_EXTRA_REQUIREMENTS: +# - name: mypackage +# version: 1.0.1 +# - name: git+https://git.myproject.org/MyProject#egg=MyProject +EDXAPP_EXTRA_REQUIREMENTS: [] + +# List of private requirements that should be installed into the +# edxapp virtual environment. +# `name` (required), 'extra_args' (optional) +# Example: +# EDXAPP_PRIVATE_REQUIREMENTS: +# - name: git+https://git.myproject.org/MyProject#egg=MyProject +# Note: This list contains edx.org specific dependencies, even though this is +# a public repo. The plan is to phase this out along with the rest of this +# repo as part of the DEPR https://github.com/openedx/public-engineering/issues/51. +EDXAPP_PRIVATE_REQUIREMENTS: + # For Harvard courses: + - name: xblock-problem-builder==5.1.3 + # Oppia XBlock + - name: git+https://github.com/oppia/xblock.git@1030adb3590ad2d32c93443cc8690db0985d76b6#egg=oppia-xblock + extra_args: -e + # This repository contains schoolyourself-xblock, which is used in + # edX's "AlgebraX" and "GeometryX" courses. + - name: git+https://github.com/openedx/schoolyourself-xblock.git@2093048720cfb36cc05b3143cd6f2585c7c64d85#egg=schoolyourself-xblock + extra_args: -e + # Prototype XBlocks from edX learning sciences limited roll-outs and user testing. + # Concept XBlock, in particular, is nowhere near finished and an early prototype. + # Profile XBlock is there so we can play with XBlock arguments in the platform, but isn't ready for use outside of + # edX. + - name: git+https://github.com/openedx/ConceptXBlock.git@75dd86e5fa4c54ab2f04c95c4fd3389ac1f56174#egg=concept-xblock + extra_args: -e + - name: git+https://github.com/openedx/AudioXBlock.git@20538c6e9bb704801a71ecbb6981f794556dfc45#egg=audio-xblock + extra_args: -e + - name: git+https://github.com/openedx/AnimationXBlock.git@c950ffdda2f69effda93bf03df8646f61d3ffada#egg=animation-xblock + extra_args: -e + # Peer instruction XBlock + - name: git+https://github.com/ubc/ubcpi.git@1.0.0#egg=ubcpi-xblock + extra_args: -e + # Vector Drawing and ActiveTable XBlocks (Davidson) + - name: git+https://github.com/open-craft/xblock-vectordraw.git@0b931ae5d6314dbda5b58ab6c865aea1bc121267#egg=vectordraw-xblock==0.3.0 + extra_args: -e + - name: git+https://github.com/open-craft/xblock-activetable.git@d3fb772435c382b59293e4e688a6a3096c4f6fd7#egg=activetable-xblock + extra_args: -e + # Stanford-developed XBlocks (technically unsupported, but here to ease migration of courses from Lagunita) + - name: xblock-qualtrics-survey==1.3.0 + - name: git+https://github.com/openedx/xblock-in-video-quiz.git@a703acd9ef82434fc7ca2bc230496f45a584bb9a#egg=invideoquiz-xblock + extra_args: -e + - name: git+https://github.com/openedx/xblock-free-text-response@83a389e0a4b0a464e5d1e4a4a201678aed5eee9a#egg=xblock-free-text-response + extra_args: -e + - name: xblock-submit-and-compare==1.2.0 + - name: xblock-sql-grader==0.4.0 + - name: openedx-xblock-image-modal==3.1.0 + # XBlocks associated with the LabXchange project + - name: git+https://github.com/open-craft/labxchange-xblocks.git@a0a8a8dad13199014d4bb29cee416289880bde0b#egg=labxchange-xblocks + extra_args: -e + # Caliper and xAPI event routing plugin + - name: edx-event-routing-backends==5.5.6 + +# List of custom middlewares that should be used in edxapp to process +# incoming HTTP resquests. Should be a list of plain strings that fully +# qualify Python classes or functions that can be used as Django middleware. +EDXAPP_EXTRA_MIDDLEWARE_CLASSES: [] + +EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: !!null +EDXAPP_GOOGLE_ANALYTICS_LINKEDIN: "" +EDXAPP_GOOGLE_ANALYTICS_TRACKING_ID: "" +EDXAPP_GOOGLE_SITE_VERIFICATION_ID: "" + +EDXAPP_OPTIMIZELY_PROJECT_ID: !!null +EDXAPP_TRACKING_SEGMENTIO_WEBHOOK_SECRET: "" +EDXAPP_CMS_SEGMENT_KEY: !!null +EDXAPP_LMS_SEGMENT_KEY: !!null +EDXAPP_EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST: [] + +EDXAPP_EDX_API_KEY: "PUT_YOUR_API_KEY_HERE" +# This is the default set in common.py +EDXAPP_VERIFY_STUDENT: + DAYS_GOOD_FOR: 365 + EXPIRING_SOON_WINDOW: 28 + +EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS: {} +EDXAPP_BULK_EMAIL_EMAILS_PER_TASK: 500 +# Instructor code that will not be run in the code sandbox +EDXAPP_COURSES_WITH_UNSAFE_CODE: [] +EDXAPP_SESSION_COOKIE_DOMAIN: "" +EDXAPP_SESSION_COOKIE_NAME: "sessionid" + +# django-session-cookie middleware +EDXAPP_DCS_SESSION_COOKIE_SAMESITE: "{{ 'None' if NGINX_ENABLE_SSL | default(False) else 'Lax' }}" +EDXAPP_DCS_SESSION_COOKIE_SAMESITE_FORCE_ALL: True + +# Whether to run reindex_course on deploy +EDXAPP_REINDEX_ALL_COURSES: false + +# Whether to run compilejsi18n on deploy +EDXAPP_COMPILE_JSI18N: false + +# XML Course related flags +EDXAPP_XML_FROM_GIT: false +EDXAPP_XML_S3_BUCKET: !!null +EDXAPP_XML_S3_KEY: !!null + +EDXAPP_NEWRELIC_LMS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-lms" +EDXAPP_NEWRELIC_CMS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-cms" +EDXAPP_NEWRELIC_WORKERS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-workers" +EDXAPP_LMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false +EDXAPP_CMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false +EDXAPP_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +EDXAPP_ORA2_FILE_PREFIX: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/ora2' +EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME: '{{ EDXAPP_AWS_STORAGE_BUCKET_NAME }}' +EDXAPP_FILE_UPLOAD_STORAGE_PREFIX: 'submissions_attachments' + +EDXAPP_CODE_JAIL_LIMITS: + # Limit the memory of the jailed process to something high but not + # infinite (512MiB in bytes) + VMEM: 536870912 + # Time in seconds that the jailed process has to run. + REALTIME: 3 + # Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes) + FSIZE: 1048576 + CPU: 1 + PROXY: 0 + +# Set the number of workers explicitly for lms and cms +# Should be set to +# EDXAPP_WORKERS: +# lms: +# cms: +EDXAPP_WORKERS: !!null +# Dashboard URL, assumes that the insights role is installed locally +EDXAPP_ANALYTICS_DASHBOARD_URL: "http://localhost:18110/courses" + +EDXAPP_REGISTRATION_EXTRA_FIELDS: + confirm_email: "hidden" + level_of_education: "optional" + gender: "optional" + year_of_birth: "optional" + mailing_address: "hidden" + goals: "optional" + honor_code: "required" + terms_of_service: "hidden" + city: "hidden" + country: "required" + +EDXAPP_CELERY_WORKERS: + - queue: default + service_variant: cms + concurrency: 1 + monitor: True + prefetch_optimization: default + - queue: high + service_variant: cms + concurrency: 1 + monitor: True + prefetch_optimization: default + - queue: default + service_variant: lms + concurrency: 1 + monitor: True + prefetch_optimization: default + - queue: high + service_variant: lms + concurrency: 1 + monitor: True + prefetch_optimization: default + - queue: high_mem + service_variant: lms + concurrency: 1 + monitor: False + max_tasks_per_child: 1 + prefetch_optimization: default +EDXAPP_RECALCULATE_GRADES_ROUTING_KEY: 'edx.lms.core.default' +EDXAPP_POLICY_CHANGE_GRADES_ROUTING_KEY: 'edx.lms.core.default' +EDXAPP_SINGLE_LEARNER_COURSE_REGRADE_ROUTING_KEY: 'edx.lms.core.default' +EDXAPP_BULK_EMAIL_ROUTING_KEY_SMALL_JOBS: 'edx.lms.core.default' +EDXAPP_PROGRAM_CERTIFICATES_ROUTING_KEY: 'edx.lms.core.default' +EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '^(.*)$', 'edx.lms.core.\\1')|list }}" +EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '^(.*)$', 'edx.cms.core.\\1')|list }}" + +EDXAPP_DEFAULT_CACHE_VERSION: "1" +EDXAPP_OAUTH_ENFORCE_SECURE: True +EDXAPP_OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS: 365 +EDXAPP_OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS: 30 +# This turns on deletion of access tokens, refresh tokens, and grants when consumed (not bulk deletions) +EDXAPP_OAUTH_DELETE_EXPIRED: True + +# Unused directory for edxapp application yaml configuration files +EDXAPP_CFG_DIR: "{{ COMMON_CFG_DIR }}/edxapp" +EDXAPP_DEPRECATED_ADVANCED_COMPONENT_TYPES: [] + +# Video Pipeline Settings +EDXAPP_VIDEO_UPLOAD_PIPELINE: + BUCKET: '' + ROOT_PATH: '' + +EDXAPP_CORS_ORIGIN_WHITELIST: [] +EDXAPP_CORS_ORIGIN_ALLOW_ALL: false +EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: "" +EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_NAME: "" +EDXAPP_CSRF_COOKIE_SECURE: false +EDXAPP_CSRF_TRUSTED_ORIGINS: [] +EDXAPP_LOGIN_REDIRECT_WHITELIST: [] + +# edx-rbac Related Settings +EDXAPP_SYSTEM_WIDE_ROLE_CLASSES: [] + +# Setting for enterprise marketing footer query params +EDXAPP_ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: {} + +# Setting for limiting API calls for integrated channel transmission. +# The setting key maps to the channel code (e.g. 'SAP' for success factors), Channel code is defined as +# part of django model of each integrated channel in edx-enterprise. +# The absence of a key/value pair translates to NO LIMIT on the number of "chunks" transmitted per cycle. +EDXAPP_INTEGRATED_CHANNELS_API_CHUNK_TRANSMISSION_LIMIT: + SAP: 1 + +# E-Commerce Related Settings +EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT: "http://localhost:8002/" +EDXAPP_ECOMMERCE_API_URL: "http://localhost:8002/api/v2" +# TODO (CCB) Remove this after all references in edx/edx-platform have been removed. +EDXAPP_ECOMMERCE_API_SIGNING_KEY: "{{ EDXAPP_JWT_SECRET_KEY }}" +EDXAPP_COURSE_CATALOG_URL_ROOT: "http://localhost:8008/" +EDXAPP_COURSE_CATALOG_API_URL: "http://localhost:8008/api/v1" +EDXAPP_CREDENTIALS_INTERNAL_SERVICE_URL: "http://localhost:8005/" +EDXAPP_CREDENTIALS_PUBLIC_SERVICE_URL: "http://localhost:8005/" + +# Learner portal settings +EDXAPP_LEARNER_PORTAL_URL_ROOT: "https://learner-portal-{{ EDXAPP_LMS_BASE }}" +# Blockstore Related Settings +EDXAPP_BLOCKSTORE_PUBLIC_URL_ROOT: "http://localhost:18250/" +EDXAPP_BLOCKSTORE_API_URL: "http://localhost:18250/api/v1" + +# List of all logout URIs for IDAs which have been converted from using DOP to using DOT. +EDXAPP_IDA_LOGOUT_URI_LIST: [] + +# which access.py permission name to check in order to determine if a course about page is +# visible. We default this to the legacy permission 'see_exists'. + +EDXAPP_COURSE_CATALOG_VISIBILITY_PERMISSION: 'see_exists' +EDXAPP_COURSE_ABOUT_VISIBILITY_PERMISSION: 'see_exists' +EDXAPP_DEFAULT_COURSE_VISIBILITY_IN_CATALOG: 'both' +EDXAPP_DEFAULT_MOBILE_AVAILABLE: false + +# Mailchimp Settings +EDXAPP_MAILCHIMP_NEW_USER_LIST_ID: null + +# Social Sharing Related Settings +EDXAPP_SOCIAL_SHARING_SETTINGS: + CUSTOM_COURSE_URLS: false + DASHBOARD_FACEBOOK: false + CERTIFICATE_FACEBOOK: false + CERTIFICATE_TWITTER: false + DASHBOARD_TWITTER: false + +#To use AWS S3 as your backend, you need different kwargs: +# EDXAPP_PROFILE_IMAGE_BACKEND_CONFIG: +# class: storages.backends.s3boto3.S3Boto3Storage +# options: +# location: path/to/images # Note: The location should not begin with a leading slash. +# bucket: mybucket +# custom_domain: mybucket.s3.amazonaws.com +# access_key: XXXAWS_ACCESS_KEYXXX +# secret_key: XXXAWS_SECRETY_KEYXXX +# headers: +# Cache-Control: max-age-{{ EDXAPP_PROFILE_IMAGE_MAX_AGE }} +#NB: access_key and secret_key are unneccessary if you use IAM roles +EDXAPP_PROFILE_IMAGE_BACKEND: + class: openedx.core.storage.OverwriteStorage + options: + location: "{{ edxapp_media_dir }}/profile-images/" + base_url: "{{ EDXAPP_MEDIA_URL }}/profile-images/" + +EDXAPP_PROFILE_IMAGE_MAX_AGE: 31536000 + +# used to salt hashed usernames for profile image filenames +EDXAPP_PROFILE_IMAGE_HASH_SEED: placeholder_secret_key + +# In bytes +EDXAPP_PROFILE_IMAGE_MAX_BYTES: 1048576 +EDXAPP_PROFILE_IMAGE_MIN_BYTES: 100 + +EDXAPP_PROFILE_IMAGE_SIZES_MAP: + full: 500 + large: 120 + medium: 50 + small: 30 + +EDXAPP_PARSE_KEYS: {} + +# In a production environment when using separate clusters, you'll +# want to route requests differently from the LMS (internal api) and +# from JS (public API) +EDXAPP_EDXNOTES_PUBLIC_API: http://localhost:18120/api/v1 +EDXAPP_EDXNOTES_INTERNAL_API: http://localhost:18120/api/v1 + +EDXAPP_XBLOCK_SETTINGS: {} + +# Secret keys shared with credit providers. +# Used to digitally sign credit requests (us --> provider) +# and validate responses (provider --> us). +# Each key in the dictionary is a credit provider ID, and +# the value is the 32-character key. +EDXAPP_CREDIT_PROVIDER_SECRET_KEYS: {} + +# Proctoring configuration (redirct URLs and keys shared between systems) +EDXAPP_PROCTORING_SETTINGS: {} +EDXAPP_PROCTORING_BACKENDS: + DEFAULT: "null" + # The null key needs to be quoted because + # null is a language independent type in YAML + "null": {} + +# Configuration needed for the retirement service +EDXAPP_RETIREMENT_SERVICE_USER_EMAIL: "retirement_worker@example.com" +EDXAPP_RETIREMENT_SERVICE_USER_NAME: "retirement_worker" +EDXAPP_RETIRED_USERNAME_PREFIX: "retired__user_" +EDXAPP_RETIRED_EMAIL_PREFIX: "retired__user_" +EDXAPP_RETIRED_EMAIL_DOMAIN: "retired.invalid" +EDXAPP_RETIRED_USER_SALTS: + - "OVERRIDE ME WITH A RANDOM VALUE" + - "ROTATE SALTS BY APPENDING NEW VALUES" +# These get loaded into database models per environment via management command +# These are the required states, environmental overrides are in edx-internal. +EDXAPP_RETIREMENT_STATES: + - "PENDING" + - "RETIRING_FORUMS" + - "FORUMS_COMPLETE" + - "RETIRING_SALESFORCE_LEADS" + - "SALESFORCE_LEADS_COMPLETE" + - "RETIRING_SEGMENT" + - "SEGMENT_COMPLETE" + - "RETIRING_HUBSPOT" + - "HUBSPOT_COMPLETE" + - "RETIRING_BRAZE" + - "BRAZE_COMPLETE" + - "RETIRING_ENROLLMENTS" + - "ENROLLMENTS_COMPLETE" + - "RETIRING_NOTES" + - "NOTES_COMPLETE" + - "RETIRING_PROCTORING" + - "PROCTORING_COMPLETE" + - "RETIRING_DEMOGRAPHICS" + - "DEMOGRAPHICS_COMPLETE" + - "RETIRING_LICENSE_MANAGER" + - "LICENSE_MANAGER_COMPLETE" + - "RETIRING_LMS_MISC" + - "LMS_MISC_COMPLETE" + - "RETIRING_LMS" + - "LMS_COMPLETE" + - "ADDING_TO_PARTNER_QUEUE" + - "PARTNER_QUEUE_COMPLETE" + - "ERRORED" + - "ABORTED" + - "COMPLETE" + +EDXAPP_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + +# Comprehensive Theming +# Deprecated, maintained for backward compatibility +EDXAPP_COMPREHENSIVE_THEME_DIR: "" + +# list of paths to the comprehensive theme directories +EDXAPP_COMPREHENSIVE_THEME_DIRS: + - "{{ EDXAPP_COMPREHENSIVE_THEME_DIR }}" + +# list of paths to the comprehensive theme locale directories +EDXAPP_COMPREHENSIVE_THEME_LOCALE_PATHS: [] + +# list of paths to locale directories to load first +EDXAPP_PREPEND_LOCALE_PATHS: [] + +# Name of the default site theme +EDXAPP_DEFAULT_SITE_THEME: "" +EDXAPP_ENABLE_COMPREHENSIVE_THEMING: false + +# Path to directory to load custom resource templates for the studio/lms. +EDXAPP_CUSTOM_RESOURCE_TEMPLATES_DIRECTORY: null + +# Git repo for the comprehensive theme (if using a comprehensive theme +# other than the ones bundled with edx/platform) +EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO: "" +# Git branch, tag, or revision to check out from +# EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO +EDXAPP_COMPREHENSIVE_THEME_VERSION: "" + +# SAML KEYS +EDXAPP_SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '' +EDXAPP_SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: '' + +EDXAPP_SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT: {} +EDXAPP_SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT: {} + +# Session cookie setting +# Only set this to true for client side profiling, never for production +EDXAPP_SESSION_SAVE_EVERY_REQUEST: false -# Configuration for database migration -EDXAPP_TEST_MIGRATE_DB_NAME: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ EDXAPP_MYSQL_DB_NAME }}" +EDXAPP_SESSION_COOKIE_SECURE: false + +# Optionally add a cron job to run the "clearsessions" management command to delete expired sessions +# Hours and minutes follow cron syntax. +# E.g. "15,19" hours and "0" minutes will run it at 15:00 and 19:00. +# "*" hours and "5" minutes will run it every hour at past 5 minutes. +EDXAPP_CLEARSESSIONS_CRON_ENABLED: false +EDXAPP_CLEARSESSIONS_CRON_HOURS: "14" +EDXAPP_CLEARSESSIONS_CRON_MINUTES: "0" + +# Add additional cron jobs from the given list. +# See ansible docs for valid options for these items: +# https://docs.ansible.com/ansible/latest/modules/cron_module.html +EDXAPP_ADDITIONAL_CRON_JOBS: [] + +EDXAPP_VIDEO_IMAGE_MAX_AGE: 31536000 + +# This is django storage configuration for Video Image settings. +# You can configure S3 or Swift in lms/envs/common.py +EDXAPP_VIDEO_IMAGE_SETTINGS: + VIDEO_IMAGE_MAX_BYTES : 2097152 + VIDEO_IMAGE_MIN_BYTES : 2048 + STORAGE_KWARGS: + location: "{{ edxapp_video_storage_location }}/" + base_url: "{{ EDXAPP_MEDIA_URL }}/" + DIRECTORY_PREFIX: 'video-images/' + +EDXAPP_VIDEO_TRANSCRIPTS_MAX_AGE: 31536000 + +# This is django storage configuration for Video Transcripts settings. +EDXAPP_VIDEO_TRANSCRIPTS_SETTINGS: + VIDEO_TRANSCRIPTS_MAX_BYTES : 3145728 + STORAGE_KWARGS: + location: "{{ edxapp_video_storage_location }}/" + base_url: "{{ EDXAPP_MEDIA_URL }}/" + DIRECTORY_PREFIX: 'video-transcripts/' + +# Course Block Structures +EDXAPP_BLOCK_STRUCTURES_SETTINGS: + # Delay, in seconds, after a new edit of a course is published + # before updating the block structures cache. This is needed + # for a better chance at getting the latest changes when there + # are secondary reads in sharded mongoDB clusters. See TNL-5041 + # for more info. + COURSE_PUBLISH_TASK_DELAY: 30 + + # Delay, in seconds, between retry attempts if a task fails. + TASK_DEFAULT_RETRY_DELAY: 30 + + # Maximum number of retries per task. + TASK_MAX_RETRIES: 5 + + PRUNING_ACTIVE: false + +# Configuration needed for LMS to communicate with the Discovery service +DISCOVERY_SERVICE_USER_EMAIL: "discovery_worker@example.com" +DISCOVERY_SERVICE_USER_NAME: "discovery_worker" + +# Configuration needed for LMS to communicate with the Ecommerce service +ECOMMERCE_SERVICE_USER_EMAIL: "ecommerce_worker@example.com" +ECOMMERCE_SERVICE_USER_NAME: "ecommerce_worker" + +# Configuration needed for LMS to communicate with the Studio service +EDXAPP_CMS_SERVICE_USER_EMAIL: "edxapp_cms_worker@example.com" +EDXAPP_CMS_SERVICE_USER_NAME: "edxapp_cms_worker" + + +# Configuration needed for LMS to communicate with the Credentials service +CREDENTIALS_SERVICE_USER_EMAIL: "credentials_worker@example.com" +CREDENTIALS_SERVICE_USER_NAME: "credentials_worker" + +# Configuration needed for LMS to communicate with the Insights service +INSIGHTS_SERVICE_USER_EMAIL: "insights_worker@example.com" +INSIGHTS_SERVICE_USER_NAME: "insights_worker" + +# Configuration needed for LMS to communicate with the Credentials service +REGISTRAR_SERVICE_USER_EMAIL: "registrar_worker@example.com" +REGISTRAR_SERVICE_USER_NAME: "registrar_worker" + +# Configuration needed for LMS to communicate with the Designer service +DESIGNER_SERVICE_USER_EMAIL: "designer_worker@example.com" +DESIGNER_SERVICE_USER_NAME: "designer_worker" + +# Configuration needed for LMS to communicate with the License Manager service +LICENSE_MANAGER_SERVICE_USER_EMAIL: "license_manager_worker@example.com" +LICENSE_MANAGER_SERVICE_USER_NAME: "license_manager_worker" + +# Configuration needed for LMS to communicate with the Commerce Coordinator service +COMMERCE_COORDINATOR_SERVICE_USER_EMAIL: "commerce_coordinator_worker@example.com" +COMMERCE_COORDINATOR_SERVICE_USER_NAME: "commerce_coordinator_worker" + +ENTERPRISE_CATALOG_SERVICE_USER_EMAIL: "enterprise_catalog_worker@example.com" +ENTERPRISE_CATALOG_SERVICE_USER_NAME: "enterprise_catalog_worker" + +# Configuration settings needed for the LMS to communicate with the Enterprise service. +EDXAPP_ENTERPRISE_API_URL: "{{ EDXAPP_LMS_INTERNAL_ROOT_URL }}/enterprise/api/v1" + +EDXAPP_ENTERPRISE_SERVICE_WORKER_EMAIL: "enterprise_worker@example.com" +EDXAPP_ENTERPRISE_SERVICE_WORKER_USERNAME: "enterprise_worker" + +EDXAPP_VEDA_SERVICE_CLIENT_NAME: "veda" +EDXAPP_VEDA_SERVICE_API_URL: "{{ EDXAPP_LMS_BASE_SCHEME | default('https') }}://veda-{{ EDXAPP_LMS_BASE }}/api/" +EDXAPP_VEDA_SERVICE_USER_EMAIL: "veda_service_user@example.com" +EDXAPP_VEDA_SERVICE_USER_NAME: "veda_service_user" + +EDXAPP_ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES: + - audit + - honor + +EDXAPP_ENTERPRISE_CUSTOMER_SUCCESS_EMAIL: "customersuccess@edx.org" +EDXAPP_ENTERPRISE_INTEGRATIONS_EMAIL: "enterprise-integrations@edx.org" + +EDXAPP_ENTERPRISE_ENROLLMENT_API_URL: "{{ EDXAPP_LMS_INTERNAL_ROOT_URL }}/api/enrollment/v1/" + +EDXAPP_ENTERPRISE_SUPPORT_URL: '' + +EDXAPP_ENTERPRISE_TAGLINE: '' + +# The assigned ICP license number for display in the platform footer +EDXAPP_ICP_LICENSE: !!null +EDXAPP_ICP_LICENSE_INFO: {} + +# Base Cookie Domain to share cookie across edx domains +EDXAPP_BASE_COOKIE_DOMAIN: "{{ EDXAPP_LMS_SITE_NAME }}" + +# Account password configuration +EDXAPP_AUTH_PASSWORD_VALIDATORS: + - NAME: 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' + - NAME: 'common.djangoapps.util.password_policy_validators.MinimumLengthValidator' + OPTIONS: + min_length: 2 + - NAME: 'common.djangoapps.util.password_policy_validators.MaximumLengthValidator' + OPTIONS: + max_length: 75 + +# The age at which a learner no longer requires parental consent, or None +EDXAPP_PARENTAL_CONSENT_AGE_LIMIT: 13 + +# Scorm Xblock configurations +EDXAPP_SCORM_PKG_STORAGE_DIR: !!null +EDXAPP_SCORM_PLAYER_LOCAL_STORAGE_ROOT: !!null + +# maintenance banner message (only actually enabled via waffle switch) +EDXAPP_MAINTENANCE_BANNER_TEXT: "Sample banner message" + +EDXAPP_PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: + ENFORCE_COMPLIANCE_ON_LOGIN: false + +# Needed to link the LMS instructor dashboard to the writable gradebook micro-frontend +EDXAPP_LMS_WRITABLE_GRADEBOOK_URL: null + +# Needed to link to the new profile micro-frontend. +EDXAPP_PROFILE_MICROFRONTEND_URL: null + +# Needed to link to the new order history micro-frontend. +EDXAPP_ORDER_HISTORY_MICROFRONTEND_URL: null + +# Needed to link to the new account micro-frontend. +EDXAPP_ACCOUNT_MICROFRONTEND_URL: null + +# Needed to link to the program console micro-frontend. +EDXAPP_PROGRAM_CONSOLE_MICROFRONTEND_URL: null + +# Needed to link to the learning micro-frontend. +EDXAPP_LEARNING_MICROFRONTEND_URL: null + +# Needed to link to the course-authoring micro-frontend +EDXAPP_COURSE_AUTHORING_MICROFRONTEND_URL: null + +# Remote config +EDXAPP_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +EDXAPP_ENABLE_LEGACY_JSON_CONFIGS: false +EDXAPP_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +EDXAPP_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# Dashboard courses default limit +EDXAPP_DASHBOARD_COURSE_LIMIT: null + +# Completion Aggregator URL needed for progress bar +EDXAPP_COMPLETION_AGGREGATOR_URL: null #-------- Everything below this line is internal to the role ------------ @@ -145,273 +1113,611 @@ EDXAPP_TEST_MIGRATE_DB_NAME: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_t edxapp_data_dir: "{{ COMMON_DATA_DIR }}/edxapp" edxapp_app_dir: "{{ COMMON_APP_DIR }}/edxapp" -edxapp_log_dir: "{{ COMMON_LOG_DIR }}/edxapp" +edxapp_log_dir: "{{ COMMON_LOG_DIR }}/edx" edxapp_venvs_dir: "{{ edxapp_app_dir }}/venvs" edxapp_venv_dir: "{{ edxapp_venvs_dir }}/edxapp" edxapp_venv_bin: "{{ edxapp_venv_dir }}/bin" -edxapp_rbenv_dir: "{{ edxapp_app_dir }}" -edxapp_rbenv_root: "{{ edxapp_rbenv_dir }}/.rbenv" -edxapp_rbenv_shims: "{{ edxapp_rbenv_root }}/shims" -edxapp_rbenv_bin: "{{ edxapp_rbenv_root }}/bin" -edxapp_gem_root: "{{ edxapp_rbenv_dir }}/.gem" -edxapp_gem_bin: "{{ edxapp_gem_root }}/bin" +edxapp_nodeenv_dir: "{{ edxapp_app_dir }}/nodeenvs/edxapp" +edxapp_nodeenv_bin: "{{ edxapp_nodeenv_dir }}/bin" +edxapp_npm_dir: "{{ edxapp_app_dir }}/.npm" +edxapp_npm_bin: "{{ edxapp_npm_dir }}/bin" +edxapp_settings: '{{ EDXAPP_SETTINGS }}' +EDXAPP_NODE_VERSION: "18" +EDXAPP_NPM_VERSION: "10.5.1" +# This is where node installs modules, not node itself +edxapp_node_bin: "{{ edxapp_code_dir }}/node_modules/.bin" edxapp_user: edxapp -edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_rbenv_bin }}:{{ edxapp_rbenv_shims }}:{{ edxapp_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +edxapp_user_createhome: 'no' +edxapp_user_shell: '/bin/false' +edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_npm_bin }}:{{ edxapp_node_bin }}:{{ edxapp_nodeenv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles" +edxapp_media_dir: "{{ edxapp_data_dir }}/media" +edxapp_media_dir_s3: "{{ edxapp_media_dir | regex_replace('^\\/', '') }}" +edxapp_video_storage_location: "{% if EDXAPP_DEFAULT_FILE_STORAGE == 'django.core.files.storage.FileSystemStorage' %}{{ edxapp_media_dir }}{% else %}{{ edxapp_media_dir_s3 }}{% endif %}/" +edxapp_course_static_dir: "{{ edxapp_data_dir }}/course_static" edxapp_course_data_dir: "{{ edxapp_data_dir }}/data" edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads" edxapp_theme_dir: "{{ edxapp_data_dir }}/themes" -edxapp_git_identity: "{{ edxapp_app_dir }}/{{ EDXAPP_LOCAL_GIT_IDENTITY|basename }}" +edxapp_git_identity: "{{ edxapp_app_dir }}/edxapp-git-identity" edxapp_git_ssh: "/tmp/edxapp_git_ssh.sh" -edxapp_pypi_local_mirror: "http://localhost:{{ devpi_port }}/root/pypi/+simple" -edxapp_workers: - - queue: low - service_variant: cms - concurrency: 3 - - queue: default - service_variant: cms - concurrency: 4 - - queue: high - service_variant: cms - concurrency: 1 - - queue: low - service_variant: lms - concurrency: 1 - - queue: default - service_variant: lms - concurrency: 3 - - queue: high - service_variant: lms - concurrency: 4 - - queue: high_mem - service_variant: lms - concurrency: 2 +edxapp_studio_cfg: "{{ COMMON_CFG_DIR }}/studio.yml" +edxapp_lms_cfg: "{{ COMMON_CFG_DIR }}/lms.yml" +edxapp_revision_cfg: "{{ COMMON_CFG_DIR }}/revisions.yml" + +edxapp_devstack_logs: + - "{{ supervisor_log_dir }}/cms-stdout.log" + - "{{ supervisor_log_dir }}/cms-stderr.log" + - "{{ supervisor_log_dir }}/lms-stdout.log" + - "{{ supervisor_log_dir }}/lms-stderr.log" + +# Only install packages which are appropriate for this environment +edxapp_npm_production: "yes" + +edxapp_workers: "{{ EDXAPP_CELERY_WORKERS }}" +EDXAPP_WORKER_DEFAULT_STOPWAITSECS: 432000 # setup for python codejail edxapp_sandbox_venv_dir: '{{ edxapp_venvs_dir }}/edxapp-sandbox' edxapp_sandbox_user: 'sandbox' # I think something about the codejail requires hardcoding this to sandbox:sandbox +EDXAPP_SANDBOX_PYTHON_VERSION: "{{ EDXAPP_PYTHON_VERSION }}" +edxapp_sandbox_python_version: "{{ EDXAPP_SANDBOX_PYTHON_VERSION }}" # apparmor command edxapp_aa_command: "{% if EDXAPP_SANDBOX_ENFORCE %}aa-enforce{% else %}aa-complain{% endif %}" -# Requirement files we explicitely -# check for changes before attempting -# to update the venv -edxapp_chksum_req_files: - - "{{ pre_requirements_file }}" - - "{{ post_requirements_file }}" - - "{{ base_requirements_file }}" - - "{{ sandbox_post_requirements }}" - - "{{ sandbox_base_requirements }}" - -# all edxapp requirements files -edxapp_all_req_files: - - "{{ pre_requirements_file }}" - - "{{ post_requirements_file }}" - - "{{ base_requirements_file }}" - - "{{ repo_requirements_file }}" - - "{{ github_requirements_file }}" - - "{{ sandbox_post_requirements }}" - - "{{ sandbox_local_requirements }}" - - "{{ sandbox_base_requirements }}" - - # TODO: old style variable syntax is necessary # for lists and dictionaries +edxapp_helper_scripts: + - edxapp-migrate + - edxapp-runserver + - edxapp-shell -edxapp_environment: - LANG: $EDXAPP_LANG - NO_PREREQ_INSTALL: $EDXAPP_NO_PREREQ_INSTALL +edxapp_environment_default: + LANG: "{{ EDXAPP_LANG }}" + NO_PREREQ_INSTALL: "{{ EDXAPP_NO_PREREQ_INSTALL }}" SKIP_WS_MIGRATIONS: 1 - RBENV_ROOT: $edxapp_rbenv_root - GEM_HOME: $edxapp_gem_root - GEM_PATH: $edxapp_gem_root - PATH: $edxapp_deploy_path + PATH: "{{ edxapp_deploy_path }}" + # the settings module for edxapp, DJANGO_SETTINGS_MODULE + # should be set to {{SERVICE_VARIANT}}.{{EDXAPP_SETTINGS}} + # where SERVICE_VARIANT is lms or cms + EDX_PLATFORM_SETTINGS: "{{ EDXAPP_SETTINGS }}" + # Current set to the app dir for json config, this should + # be updated to /edx/etc/edxapp when the switch to + # yaml based configs is complete + CONFIG_ROOT: "{{ edxapp_app_dir }}" + LMS_CFG: "{{ edxapp_lms_cfg }}" + STUDIO_CFG: "{{ edxapp_studio_cfg }}" + BOTO_CONFIG: "{{ edxapp_app_dir }}/.boto" + REVISION_CFG: "{{ edxapp_revision_cfg }}" + NODE_PATH: "{{ edxapp_npm_dir }}/lib/modules:/usr/lib/node_modules" + MANPATH: "{{ edxapp_npm_dir }}/share/man:$(manpath)" + +edxapp_environment_extra: {} + +edxapp_environment: "{{ edxapp_environment_default | combine(edxapp_environment_extra) }}" + +git_ssh_environment_mixin: + GIT_SSH: "{{ edxapp_git_ssh }}" + +edxapp_generic_contentstore_config: &edxapp_generic_default_contentstore + ENGINE: 'xmodule.contentstore.mongo.MongoContentStore' + # + # connection strings are duplicated temporarily for + # backward compatibility + # + OPTIONS: + db: "{{ EDXAPP_MONGO_DB_NAME }}" + host: "{{ EDXAPP_MONGO_HOSTS }}" + password: "{{ EDXAPP_MONGO_PASSWORD }}" + port: "{{ EDXAPP_MONGO_PORT }}" + user: "{{ EDXAPP_MONGO_USER }}" + ssl: "{{ EDXAPP_MONGO_USE_SSL }}" + auth_source: "{{ EDXAPP_MONGO_AUTH_DB }}" + +edxapp_generic_doc_store_config: &edxapp_generic_default_docstore + db: "{{ EDXAPP_MONGO_DB_NAME }}" + host: "{{ EDXAPP_MONGO_HOSTS }}" + replicaSet: "{{ EDXAPP_MONGO_REPLICA_SET }}" + password: "{{ EDXAPP_MONGO_PASSWORD }}" + port: "{{ EDXAPP_MONGO_PORT }}" + user: "{{ EDXAPP_MONGO_USER }}" + collection: 'modulestore' + ssl: "{{ EDXAPP_MONGO_USE_SSL }}" + # https://api.mongodb.com/python/2.9.1/api/pymongo/mongo_client.html#module-pymongo.mongo_client + socketTimeoutMS: 3000 # default is never timeout while the connection is open, this means it needs to explicitly close raising pymongo.errors.NetworkTimeout + connectTimeoutMS: 2000 # default is 20000, I believe raises pymongo.errors.ConnectionFailure + # Not setting waitQueueTimeoutMS and waitQueueMultiple since pymongo defaults to nobody being allowed to wait + authsource: "{{ EDXAPP_MONGO_AUTH_DB }}" + +EDXAPP_LMS_DRAFT_DOC_STORE_CONFIG: + <<: *edxapp_generic_default_docstore + read_preference: "{{ EDXAPP_LMS_DRAFT_DOC_STORE_READ_PREFERENCE }}" + +EDXAPP_LMS_SPLIT_DOC_STORE_CONFIG: + <<: *edxapp_generic_default_docstore + read_preference: "{{ EDXAPP_LMS_SPLIT_DOC_STORE_READ_PREFERENCE }}" + +EDXAPP_CMS_DOC_STORE_CONFIG: + <<: *edxapp_generic_default_docstore + read_preference: "{{ EDXAPP_MONGO_CMS_READ_PREFERENCE }}" +edxapp_databases: +# edxapp's edxapp-migrate scripts and the edxapp_migrate play +# will ensure that any DB not named read_replica will be migrated +# for both the lms and cms. + read_replica: + ENGINE: 'django.db.backends.mysql' + NAME: "{{ EDXAPP_MYSQL_REPLICA_DB_NAME }}" + USER: "{{ EDXAPP_MYSQL_REPLICA_USER }}" + PASSWORD: "{{ EDXAPP_MYSQL_REPLICA_PASSWORD }}" + HOST: "{{ EDXAPP_MYSQL_REPLICA_HOST }}" + PORT: "{{ EDXAPP_MYSQL_REPLICA_PORT }}" + CONN_MAX_AGE: "{{ EDXAPP_MYSQL_CONN_MAX_AGE }}" + OPTIONS: "{{ EDXAPP_MYSQL_REPLICA_OPTIONS }}" + default: + ENGINE: 'django.db.backends.mysql' + NAME: "{{ EDXAPP_MYSQL_DB_NAME }}" + USER: "{{ EDXAPP_MYSQL_USER }}" + PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}" + HOST: "{{ EDXAPP_MYSQL_HOST }}" + PORT: "{{ EDXAPP_MYSQL_PORT }}" + ATOMIC_REQUESTS: "{{ EDXAPP_MYSQL_ATOMIC_REQUESTS }}" + CONN_MAX_AGE: "{{ EDXAPP_MYSQL_CONN_MAX_AGE }}" + OPTIONS: "{{ EDXAPP_MYSQL_OPTIONS }}" + student_module_history: + ENGINE: 'django.db.backends.mysql' + NAME: "{{ EDXAPP_MYSQL_CSMH_DB_NAME }}" + USER: "{{ EDXAPP_MYSQL_CSMH_USER }}" + PASSWORD: "{{ EDXAPP_MYSQL_CSMH_PASSWORD }}" + HOST: "{{ EDXAPP_MYSQL_CSMH_HOST }}" + PORT: "{{ EDXAPP_MYSQL_CSMH_PORT }}" + CONN_MAX_AGE: "{{ EDXAPP_MYSQL_CONN_MAX_AGE }}" + OPTIONS: "{{ EDXAPP_MYSQL_CSMH_OPTIONS }}" edxapp_generic_auth_config: &edxapp_generic_auth - AWS_ACCESS_KEY_ID: $EDXAPP_AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY: $EDXAPP_AWS_SECRET_ACCESS_KEY - SECRET_KEY: $EDXAPP_EDXAPP_SECRET_KEY + EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST: "{{ EDXAPP_EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST }}" + ECOMMERCE_API_SIGNING_KEY: "{{ EDXAPP_ECOMMERCE_API_SIGNING_KEY }}" + DEFAULT_FILE_STORAGE: "{{ EDXAPP_DEFAULT_FILE_STORAGE }}" + AWS_ACCESS_KEY_ID: "{{ EDXAPP_AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "{{ EDXAPP_AWS_SECRET_ACCESS_KEY }}" + AWS_QUERYSTRING_AUTH: "{{ EDXAPP_AWS_QUERYSTRING_AUTH }}" + AWS_STORAGE_BUCKET_NAME: "{{ EDXAPP_AWS_STORAGE_BUCKET_NAME }}" + AWS_S3_CUSTOM_DOMAIN: "{{ EDXAPP_AWS_S3_CUSTOM_DOMAIN }}" + SWIFT_USERNAME: "{{ EDXAPP_SWIFT_USERNAME }}" + SWIFT_KEY: "{{ EDXAPP_SWIFT_KEY }}" + SWIFT_TENANT_ID: "{{ EDXAPP_SWIFT_TENANT_ID }}" + SWIFT_TENANT_NAME: "{{ EDXAPP_SWIFT_TENANT_NAME }}" + SWIFT_AUTH_URL: "{{ EDXAPP_SWIFT_AUTH_URL }}" + SWIFT_AUTH_VERSION: "{{ EDXAPP_SWIFT_AUTH_VERSION }}" + SWIFT_REGION_NAME: "{{ EDXAPP_SWIFT_REGION_NAME }}" + SWIFT_USE_TEMP_URLS: "{{ EDXAPP_SWIFT_USE_TEMP_URLS }}" + SWIFT_TEMP_URL_KEY: "{{ EDXAPP_SWIFT_TEMP_URL_KEY }}" + SWIFT_TEMP_URL_DURATION: "{{ EDXAPP_SWIFT_TEMP_URL_DURATION }}" + SECRET_KEY: "{{ EDXAPP_EDXAPP_SECRET_KEY }}" XQUEUE_INTERFACE: - basic_auth: $EDXAPP_XQUEUE_BASIC_AUTH - django_auth: $EDXAPP_XQUEUE_DJANGO_AUTH - url: $EDXAPP_XQUEUE_URL - DOC_STORE_CONFIG: &edxapp_generic_default_docstore - db: $EDXAPP_MONGO_DB_NAME - host: $EDXAPP_MONGO_HOSTS - password: $EDXAPP_MONGO_PASSWORD - port: $EDXAPP_MONGO_PORT - user: $EDXAPP_MONGO_USER - collection: 'modulestore' - CONTENTSTORE: - ENGINE: 'xmodule.contentstore.mongo.MongoContentStore' - # - # connection strings are duplicated temporarily for - # backward compatibility - # - OPTIONS: - db: $EDXAPP_MONGO_DB_NAME - host: $EDXAPP_MONGO_HOSTS - password: $EDXAPP_MONGO_PASSWORD - port: $EDXAPP_MONGO_PORT - user: $EDXAPP_MONGO_USER - DOC_STORE_CONFIG: *edxapp_generic_default_docstore - MODULESTORE: - default: &edxapp_generic_default_modulestore - ENGINE: 'xmodule.modulestore.mongo.DraftMongoModuleStore' - OPTIONS: &generic_modulestore_default_options - collection: 'modulestore' - db: $EDXAPP_MONGO_DB_NAME - default_class: 'xmodule.hidden_module.HiddenDescriptor' - fs_root: $edxapp_course_data_dir - host: $EDXAPP_MONGO_HOSTS - password: $EDXAPP_MONGO_PASSWORD - port: $EDXAPP_MONGO_PORT - render_template: 'edxmako.shortcuts.render_to_string' - # Needed for the CMS to be able to run update_templates - user: $EDXAPP_MONGO_USER - DOC_STORE_CONFIG: *edxapp_generic_default_docstore - direct: &edxapp_generic_direct_modulestore - ENGINE: 'xmodule.modulestore.mongo.MongoModuleStore' - OPTIONS: *generic_modulestore_default_options - DOC_STORE_CONFIG: *edxapp_generic_default_docstore - DATABASES: - default: - ENGINE: 'django.db.backends.mysql' - NAME: $EDXAPP_MYSQL_DB_NAME - USER: $EDXAPP_MYSQL_USER - PASSWORD: $EDXAPP_MYSQL_PASSWORD - HOST: $EDXAPP_MYSQL_HOST - PORT: $EDXAPP_MYSQL_PORT - OPEN_ENDED_GRADING_INTERFACE: - url: $EDXAPP_OEE_URL - password: $EDXAPP_OEE_PASSWORD - peer_grading: 'peer_grading' - staff_grading: 'staff_grading' - grading_controller: 'grading_controller' - username: $EDXAPP_OEE_USER - ANALYTICS_API_KEY: $EDXAPP_ANALYTICS_API_KEY - ZENDESK_USER: $EDXAPP_ZENDESK_USER - ZENDESK_API_KEY: $EDXAPP_ZENDESK_API_KEY - CELERY_BROKER_USER: $EDXAPP_CELERY_USER - CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD + basic_auth: "{{ EDXAPP_XQUEUE_BASIC_AUTH }}" + django_auth: "{{ EDXAPP_XQUEUE_DJANGO_AUTH }}" + url: "{{ EDXAPP_XQUEUE_URL }}" + DATABASES: "{{ edxapp_databases }}" + EMAIL_HOST_USER: "{{ EDXAPP_EMAIL_HOST_USER }}" + EMAIL_HOST_PASSWORD: "{{ EDXAPP_EMAIL_HOST_PASSWORD }}" + YOUTUBE_API_KEY: "{{ EDXAPP_YOUTUBE_API_KEY }}" + ZENDESK_USER: "{{ EDXAPP_ZENDESK_USER }}" + ZENDESK_API_KEY: "{{ EDXAPP_ZENDESK_API_KEY }}" + ZENDESK_OAUTH_ACCESS_TOKEN: "{{ EDXAPP_ZENDESK_OAUTH_ACCESS_TOKEN }}" + ZENDESK_GROUP_ID_MAPPING: "{{ EDXAPP_ZENDESK_GROUP_ID_MAPPING }}" + CELERY_BROKER_USER: "{{ EDXAPP_CELERY_USER }}" + CELERY_BROKER_PASSWORD: "{{ EDXAPP_CELERY_PASSWORD }}" + CELERY_TIMEZONE: "{{ EDXAPP_CELERY_TIMEZONE }}" + GOOGLE_ANALYTICS_ACCOUNT: "{{ EDXAPP_GOOGLE_ANALYTICS_ACCOUNT }}" + DJFS: "{{ EDXAPP_DJFS }}" + CREDIT_PROVIDER_SECRET_KEYS: "{{ EDXAPP_CREDIT_PROVIDER_SECRET_KEYS }}" + SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: "{{ EDXAPP_SOCIAL_AUTH_SAML_SP_PRIVATE_KEY }}" + SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: "{{ EDXAPP_SOCIAL_AUTH_SAML_SP_PUBLIC_CERT }}" + SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT: "{{ EDXAPP_SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT }}" + SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT: "{{ EDXAPP_SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT }}" + FACEBOOK_APP_ID: "{{ EDXAPP_FACEBOOK_APP_ID }}" + FACEBOOK_APP_SECRET: "{{ EDXAPP_FACEBOOK_APP_SECRET }}" + FACEBOOK_API_VERSION: "{{ EDXAPP_FACEBOOK_API_VERSION }}" + ENTERPRISE_SERVICE_WORKER_USERNAME: "{{ EDXAPP_ENTERPRISE_SERVICE_WORKER_USERNAME }}" + BRANCH_IO_KEY: "{{ EDXAPP_BRANCH_IO_KEY }}" + PROCTORING_BACKENDS: "{{ EDXAPP_PROCTORING_BACKENDS }}" + +generic_cache_config: &default_generic_cache + BACKEND: "{{ EDXAPP_CACHE_BACKEND }}" + KEY_FUNCTION: 'common.djangoapps.util.memcache.safe_key' + OPTIONS: + no_delay: true + ignore_exc: true + use_pooling: true + connect_timeout: 0.5 + +edxapp_revisions_config: + EDX_PLATFORM_REVISION: "{{ EDX_PLATFORM_VERSION }}" generic_env_config: &edxapp_generic_env + IDA_LOGOUT_URI_LIST: "{{ EDXAPP_IDA_LOGOUT_URI_LIST }}" + CREDENTIALS_INTERNAL_SERVICE_URL: "{{ EDXAPP_CREDENTIALS_INTERNAL_SERVICE_URL }}" + CREDENTIALS_PUBLIC_SERVICE_URL: "{{ EDXAPP_CREDENTIALS_PUBLIC_SERVICE_URL }}" + ECOMMERCE_PUBLIC_URL_ROOT: "{{ EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT }}" + ECOMMERCE_API_URL: "{{ EDXAPP_ECOMMERCE_API_URL }}" + BLOCKSTORE_PUBLIC_URL_ROOT: "{{ EDXAPP_BLOCKSTORE_PUBLIC_URL_ROOT }}" + BLOCKSTORE_API_URL: "{{ EDXAPP_BLOCKSTORE_API_URL }}" + LEARNER_PORTAL_URL_ROOT: "{{ EDXAPP_LEARNER_PORTAL_URL_ROOT }}" + EDX_PLATFORM_REVISION: "{{ EDX_PLATFORM_VERSION }}" + ENTERPRISE_API_URL: "{{ EDXAPP_ENTERPRISE_API_URL }}" + COURSE_CATALOG_URL_ROOT: "{{ EDXAPP_COURSE_CATALOG_URL_ROOT }}" + COURSE_CATALOG_API_URL: "{{ EDXAPP_COURSE_CATALOG_API_URL }}" + COURSE_CATALOG_VISIBILITY_PERMISSION: "{{ EDXAPP_COURSE_CATALOG_VISIBILITY_PERMISSION }}" + COURSE_ABOUT_VISIBILITY_PERMISSION: "{{ EDXAPP_COURSE_ABOUT_VISIBILITY_PERMISSION }}" + DEFAULT_COURSE_VISIBILITY_IN_CATALOG: "{{ EDXAPP_DEFAULT_COURSE_VISIBILITY_IN_CATALOG }}" + DEFAULT_MOBILE_AVAILABLE: "{{ EDXAPP_DEFAULT_MOBILE_AVAILABLE }}" + FINANCIAL_REPORTS: "{{ EDXAPP_FINANCIAL_REPORTS }}" + CORS_ORIGIN_WHITELIST: "{{ EDXAPP_CORS_ORIGIN_WHITELIST }}" + CORS_ORIGIN_ALLOW_ALL: "{{ EDXAPP_CORS_ORIGIN_ALLOW_ALL }}" + LOGIN_REDIRECT_WHITELIST: "{{ EDXAPP_LOGIN_REDIRECT_WHITELIST }}" + CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: "{{ EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_DOMAIN }}" + CROSS_DOMAIN_CSRF_COOKIE_NAME: "{{ EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_NAME }}" + CSRF_COOKIE_SECURE: "{{ EDXAPP_CSRF_COOKIE_SECURE }}" + CSRF_TRUSTED_ORIGINS: "{{ EDXAPP_CSRF_TRUSTED_ORIGINS }}" + VIDEO_UPLOAD_PIPELINE: "{{ EDXAPP_VIDEO_UPLOAD_PIPELINE }}" + DEPRECATED_ADVANCED_COMPONENT_TYPES: "{{ EDXAPP_DEPRECATED_ADVANCED_COMPONENT_TYPES }}" + XBLOCK_FS_STORAGE_BUCKET: "{{ EDXAPP_XBLOCK_FS_STORAGE_BUCKET }}" + XBLOCK_FS_STORAGE_PREFIX: "{{ EDXAPP_XBLOCK_FS_STORAGE_PREFIX }}" + ANALYTICS_DASHBOARD_URL: '{{ EDXAPP_ANALYTICS_DASHBOARD_URL }}' + CELERY_BROKER_VHOST: "{{ EDXAPP_CELERY_BROKER_VHOST }}" + CELERY_BROKER_USE_SSL: "{{ EDXAPP_CELERY_BROKER_USE_SSL }}" + CELERY_EVENT_QUEUE_TTL: "{{ EDXAPP_CELERY_EVENT_QUEUE_TTL }}" + CELERY_RESULT_BACKEND: "{{ EDXAPP_CELERY_RESULT_BACKEND }}" + PAYMENT_SUPPORT_EMAIL: "{{ EDXAPP_PAYMENT_SUPPORT_EMAIL }}" + ZENDESK_URL: "{{ EDXAPP_ZENDESK_URL }}" + ZENDESK_CUSTOM_FIELDS: "{{ EDXAPP_ZENDESK_CUSTOM_FIELDS }}" + COURSES_WITH_UNSAFE_CODE: "{{ EDXAPP_COURSES_WITH_UNSAFE_CODE }}" + BULK_EMAIL_EMAILS_PER_TASK: "{{ EDXAPP_BULK_EMAIL_EMAILS_PER_TASK }}" + DEFAULT_FILE_STORAGE: "{{ EDXAPP_DEFAULT_FILE_STORAGE }}" GRADES_DOWNLOAD: - STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE - BUCKET: $EDXAPP_GRADE_BUCKET - ROOT_PATH: $EDXAPP_GRADE_ROOT_PATH - STATIC_URL_BASE: $EDXAPP_STATIC_URL_BASE - STATIC_ROOT_BASE: $edxapp_staticfile_dir - LMS_BASE: $EDXAPP_LMS_BASE - CMS_BASE: $EDXAPP_CMS_BASE - BOOK_URL: $EDXAPP_BOOK_URL - PLATFORM_NAME: $EDXAPP_PLATFORM_NAME + STORAGE_CLASS: "{{ EDXAPP_GRADE_STORAGE_CLASS | default(None) }}" + STORAGE_KWARGS: "{{ EDXAPP_GRADE_STORAGE_KWARGS | default(None) }}" + STORAGE_TYPE: "{{ EDXAPP_GRADE_STORAGE_TYPE | default(None) }}" + BUCKET: "{{ EDXAPP_GRADE_BUCKET | default(None) }}" + ROOT_PATH: "{{ EDXAPP_GRADE_ROOT_PATH | default(None) }}" + STATIC_ROOT_BASE: "{{ edxapp_staticfile_dir }}" + LMS_BASE: "{{ EDXAPP_LMS_BASE }}" + CMS_BASE: "{{ EDXAPP_CMS_BASE }}" + LMS_ROOT_URL: "{{ EDXAPP_LMS_ROOT_URL }}" + LMS_INTERNAL_ROOT_URL: "{{ EDXAPP_LMS_INTERNAL_ROOT_URL }}" + PARTNER_SUPPORT_EMAIL: "{{ EDXAPP_PARTNER_SUPPORT_EMAIL }}" + PLATFORM_NAME: "{{ EDXAPP_PLATFORM_NAME }}" + PLATFORM_DESCRIPTION: "{{ EDXAPP_PLATFORM_DESCRIPTION }}" + ANALYTICS_DASHBOARD_NAME: "{{ EDXAPP_ANALYTICS_DASHBOARD_NAME }}" + STUDIO_NAME: "{{ EDXAPP_STUDIO_NAME }}" + STUDIO_SHORT_NAME: "{{ EDXAPP_STUDIO_SHORT_NAME }}" CERT_QUEUE: 'certificates' - LOCAL_LOGLEVEL: $EDXAPP_LOG_LEVEL + LOCAL_LOGLEVEL: "{{ EDXAPP_LOG_LEVEL }}" # default email backed set to local SMTP - EMAIL_BACKEND: $EDXAPP_EMAIL_BACKEND - FEATURES: $EDXAPP_FEATURES + EMAIL_BACKEND: "{{ EDXAPP_EMAIL_BACKEND }}" + EMAIL_HOST: "{{ EDXAPP_EMAIL_HOST }}" + EMAIL_PORT: "{{ EDXAPP_EMAIL_PORT }}" + EMAIL_USE_TLS: "{{ EDXAPP_EMAIL_USE_TLS }}" + AWS_SES_REGION_NAME: "{{ EDXAPP_AWS_SES_REGION_NAME }}" + AWS_SES_REGION_ENDPOINT: "{{ EDXAPP_AWS_SES_REGION_ENDPOINT }}" + FEATURES: "{{ EDXAPP_FEATURES }}" WIKI_ENABLED: true - SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER - SITE_NAME: $EDXAPP_SITE_NAME - LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/edx" - MEDIA_URL: $EDXAPP_MEDIA_URL - ANALYTICS_SERVER_URL: $EDXAPP_ANALYTICS_SERVER_URL - FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL - TIME_ZONE: $EDXAPP_TIME_ZONE - MKTG_URL_LINK_MAP: $EDXAPP_MKTG_URL_LINK_MAP + SYSLOG_SERVER: "{{ EDXAPP_SYSLOG_SERVER }}" + LOG_DIR: "{{ edxapp_log_dir }}" + DATA_DIR: "{{ edxapp_data_dir }}" + JWT_ISSUER: "{{ EDXAPP_LMS_ISSUER }}" + DEFAULT_JWT_ISSUER: + ISSUER: "{{ EDXAPP_LMS_ISSUER }}" + AUDIENCE: "{{ EDXAPP_JWT_AUDIENCE }}" + SECRET_KEY: "{{ EDXAPP_JWT_SECRET_KEY }}" + JWT_EXPIRATION: '{{ EDXAPP_JWT_EXPIRATION }}' + JWT_PRIVATE_SIGNING_KEY: !!null + JWT_AUTH: + JWT_ISSUER: "{{ EDXAPP_LMS_ISSUER }}" + JWT_AUDIENCE: "{{ EDXAPP_JWT_AUDIENCE }}" + JWT_SECRET_KEY: "{{ EDXAPP_JWT_SECRET_KEY }}" + JWT_ISSUERS: + - ISSUER: "{{ EDXAPP_LMS_ISSUER }}" + AUDIENCE: "{{ EDXAPP_JWT_AUDIENCE }}" + SECRET_KEY: "{{ EDXAPP_JWT_SECRET_KEY }}" + JWT_PUBLIC_SIGNING_JWK_SET: "{{ EDXAPP_JWT_PUBLIC_SIGNING_JWK_SET|string }}" + JWT_SIGNING_ALGORITHM: "{{ EDXAPP_JWT_SIGNING_ALGORITHM }}" + JWT_PRIVATE_SIGNING_JWK: "{{ EDXAPP_JWT_PRIVATE_SIGNING_JWK|string }}" + JWT_AUTH_COOKIE_HEADER_PAYLOAD: "{{ EDXAPP_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}" + JWT_AUTH_COOKIE_SIGNATURE: "{{ EDXAPP_JWT_AUTH_COOKIE_SIGNATURE }}" + + # edx-rbac Setting + SYSTEM_WIDE_ROLE_CLASSES: "{{ EDXAPP_SYSTEM_WIDE_ROLE_CLASSES }}" + + ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: "{{ EDXAPP_ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS }}" + INTEGRATED_CHANNELS_API_CHUNK_TRANSMISSION_LIMIT: "{{ EDXAPP_INTEGRATED_CHANNELS_API_CHUNK_TRANSMISSION_LIMIT }}" + + #must end in slash (https://docs.djangoproject.com/en/1.4/ref/settings/#media-url) + MEDIA_URL: "{{ EDXAPP_MEDIA_URL }}/" + MEDIA_ROOT: "{{ edxapp_media_dir }}/" + + FEEDBACK_SUBMISSION_EMAIL: "{{ EDXAPP_FEEDBACK_SUBMISSION_EMAIL }}" + TIME_ZONE: "{{ EDXAPP_TIME_ZONE }}" + LANGUAGE_CODE: "{{ EDXAPP_LANGUAGE_CODE }}" + LANGUAGE_COOKIE: "{{ EDXAPP_LANGUAGE_COOKIE }}" + CERTIFICATE_TEMPLATE_LANGUAGES: "{{ EDXAPP_CERTIFICATE_TEMPLATE_LANGUAGES }}" + MKTG_URL_LINK_MAP: "{{ EDXAPP_MKTG_URL_LINK_MAP }}" + MKTG_URLS: "{{ EDXAPP_MKTG_URLS }}" + SUPPORT_SITE_LINK: "{{ EDXAPP_SUPPORT_SITE_LINK }}" + ID_VERIFICATION_SUPPORT_LINK: "{{ EDXAPP_ID_VERIFICATION_SUPPORT_LINK }}" + ACTIVATION_EMAIL_SUPPORT_LINK: "{{ EDXAPP_ACTIVATION_EMAIL_SUPPORT_LINK }}" + PASSWORD_RESET_SUPPORT_LINK: "{{ EDXAPP_PASSWORD_RESET_SUPPORT_LINK }}" + # repo root for courses - GITHUB_REPO_ROOT: $edxapp_course_data_dir + GITHUB_REPO_ROOT: "{{ edxapp_course_data_dir }}" CACHES: - default: &default_generic_cache - BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' - KEY_FUNCTION: 'util.memcache.safe_key' - KEY_PREFIX: 'sandbox_default' - LOCATION: $EDXAPP_MEMCACHE + default: + <<: *default_generic_cache + KEY_PREFIX: 'default' + LOCATION: "{{ EDXAPP_MEMCACHE }}" + VERSION: "{{ EDXAPP_DEFAULT_CACHE_VERSION }}" general: <<: *default_generic_cache - KEY_PREFIX: 'sandbox_general' + KEY_PREFIX: 'general' + LOCATION: "{{ EDXAPP_MEMCACHE }}" mongo_metadata_inheritance: <<: *default_generic_cache - KEY_PREFIX: 'integration_mongo_metadata_inheritance' + KEY_PREFIX: 'mongo_metadata_inheritance' + TIMEOUT: 300 + LOCATION: "{{ EDXAPP_MEMCACHE }}" staticfiles: <<: *default_generic_cache - KEY_PREFIX: 'integration_static_files' + KEY_PREFIX: "{{ ansible_hostname|default('staticfiles') }}_general" + LOCATION: "{{ EDXAPP_MEMCACHE }}" + configuration: + <<: *default_generic_cache + KEY_PREFIX: "{{ ansible_hostname|default('configuration') }}" + LOCATION: "{{ EDXAPP_MEMCACHE }}" celery: <<: *default_generic_cache - KEY_PREFIX: 'integration_celery' - CELERY_BROKER_TRANSPORT: 'amqp' - CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME - COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL - LOGGING_ENV: $EDXAPP_LOGGING_ENV - SESSION_COOKIE_DOMAIN: !!null - COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY - SEGMENT_IO_LMS: true - THEME_NAME: $edxapp_theme_name - TECH_SUPPORT_EMAIL: $EDXAPP_TECH_SUPPORT_EMAIL - CONTACT_EMAIL: $EDXAPP_CONTACT_EMAIL - BUGS_EMAIL: $EDXAPP_BUGS_EMAIL + KEY_PREFIX: 'celery' + LOCATION: "{{ EDXAPP_MEMCACHE }}" + TIMEOUT: "7200" + course_structure_cache: + <<: *default_generic_cache + KEY_PREFIX: 'course_structure' + LOCATION: "{{ EDXAPP_CACHE_COURSE_STRUCTURE_MEMCACHE }}" + # Default to two hours + TIMEOUT: "7200" + CELERYBEAT_SCHEDULER: "{{ EDXAPP_CELERYBEAT_SCHEDULER }}" + CELERY_BROKER_TRANSPORT: "{{ EDXAPP_CELERY_BROKER_TRANSPORT }}" + CELERY_BROKER_HOSTNAME: "{{ EDXAPP_CELERY_BROKER_HOSTNAME }}" + COMMENTS_SERVICE_URL: "{{ EDXAPP_COMMENTS_SERVICE_URL }}" + LOGGING_ENV: "{{ EDXAPP_LOGGING_ENV }}" + SESSION_COOKIE_DOMAIN: "{{ EDXAPP_SESSION_COOKIE_DOMAIN }}" + SESSION_COOKIE_NAME: "{{ EDXAPP_SESSION_COOKIE_NAME }}" + COMMENTS_SERVICE_KEY: "{{ EDXAPP_COMMENTS_SERVICE_KEY }}" + TECH_SUPPORT_EMAIL: "{{ EDXAPP_TECH_SUPPORT_EMAIL }}" + CONTACT_EMAIL: "{{ EDXAPP_CONTACT_EMAIL }}" + BUGS_EMAIL: "{{ EDXAPP_BUGS_EMAIL }}" + DEFAULT_FROM_EMAIL: "{{ EDXAPP_DEFAULT_FROM_EMAIL }}" + DEFAULT_FEEDBACK_EMAIL: "{{ EDXAPP_DEFAULT_FEEDBACK_EMAIL }}" + SERVER_EMAIL: "{{ EDXAPP_DEFAULT_SERVER_EMAIL }}" + BULK_EMAIL_DEFAULT_FROM_EMAIL: "{{ EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL }}" + BULK_EMAIL_LOG_SENT_EMAILS: "{{ EDXAPP_BULK_EMAIL_LOG_SENT_EMAILS }}" + CAS_SERVER_URL: "{{ EDXAPP_CAS_SERVER_URL }}" + CAS_EXTRA_LOGIN_PARAMS: "{{ EDXAPP_CAS_EXTRA_LOGIN_PARAMS }}" + CAS_ATTRIBUTE_CALLBACK: "{{ EDXAPP_CAS_ATTRIBUTE_CALLBACK }}" + UNIVERSITY_EMAIL: "{{ EDXAPP_UNIVERSITY_EMAIL }}" + PRESS_EMAIL: "{{ EDXAPP_PRESS_EMAIL }}" + SOCIAL_MEDIA_FOOTER_URLS: "{{ EDXAPP_SOCIAL_MEDIA_FOOTER_URLS }}" + MOBILE_STORE_URLS: "{{ EDXAPP_MOBILE_STORE_URLS }}" + FOOTER_ORGANIZATION_IMAGE: "{{ EDXAPP_FOOTER_ORGANIZATION_IMAGE }}" + ORA2_FILE_PREFIX: "{{ EDXAPP_ORA2_FILE_PREFIX }}" + FILE_UPLOAD_STORAGE_BUCKET_NAME: "{{ EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME }}" + FILE_UPLOAD_STORAGE_PREFIX: "{{ EDXAPP_FILE_UPLOAD_STORAGE_PREFIX }}" + REGISTRATION_EXTRA_FIELDS: "{{ EDXAPP_REGISTRATION_EXTRA_FIELDS }}" + XBLOCK_SETTINGS: "{{ EDXAPP_XBLOCK_SETTINGS }}" + EDXMKTG_USER_INFO_COOKIE_NAME: "{{ EDXAPP_EDXMKTG_USER_INFO_COOKIE_NAME }}" + VIDEO_IMAGE_MAX_AGE: "{{ EDXAPP_VIDEO_IMAGE_MAX_AGE }}" + VIDEO_IMAGE_SETTINGS: "{{ EDXAPP_VIDEO_IMAGE_SETTINGS }}" + VIDEO_TRANSCRIPTS_MAX_AGE: "{{ EDXAPP_VIDEO_TRANSCRIPTS_MAX_AGE }}" + VIDEO_TRANSCRIPTS_SETTINGS: "{{ EDXAPP_VIDEO_TRANSCRIPTS_SETTINGS }}" + BLOCK_STRUCTURES_SETTINGS: "{{ EDXAPP_BLOCK_STRUCTURES_SETTINGS }}" + + COMPREHENSIVE_THEME_DIRS: "{{ EDXAPP_COMPREHENSIVE_THEME_DIRS }}" + COMPREHENSIVE_THEME_LOCALE_PATHS: "{{ EDXAPP_COMPREHENSIVE_THEME_LOCALE_PATHS }}" + PREPEND_LOCALE_PATHS: "{{ EDXAPP_PREPEND_LOCALE_PATHS }}" + ENABLE_COMPREHENSIVE_THEMING: "{{ EDXAPP_ENABLE_COMPREHENSIVE_THEMING }}" + CUSTOM_RESOURCE_TEMPLATES_DIRECTORY: "{{ EDXAPP_CUSTOM_RESOURCE_TEMPLATES_DIRECTORY }}" + DEFAULT_SITE_THEME: "{{ EDXAPP_DEFAULT_SITE_THEME }}" + SESSION_SAVE_EVERY_REQUEST: "{{ EDXAPP_SESSION_SAVE_EVERY_REQUEST }}" + SOCIAL_SHARING_SETTINGS: "{{ EDXAPP_SOCIAL_SHARING_SETTINGS }}" + SESSION_COOKIE_SECURE: "{{ EDXAPP_SESSION_COOKIE_SECURE }}" CODE_JAIL: - limits: - VMEM: 0 - REALTIME: 3 - DEFAULT_FROM_EMAIL: $EDXAPP_DEFAULT_FROM_EMAIL - DEFAULT_FEEDBACK_EMAIL: $EDXAPP_DEFAULT_FEEDBACK_EMAIL - SERVER_EMAIL: $EDXAPP_DEFAULT_SERVER_EMAIL - BULK_EMAIL_DEFAULT_FROM_EMAIL: $EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL + # from https://github.com/openedx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None + python_bin: '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}' + limits: "{{ EDXAPP_CODE_JAIL_LIMITS }}" + user: '{{ edxapp_sandbox_user }}' + AFFILIATE_COOKIE_NAME: "{{ EDXAPP_AFFILIATE_COOKIE_NAME }}" + ELASTIC_SEARCH_CONFIG: "{{ EDXAPP_ELASTIC_SEARCH_CONFIG }}" + PLATFORM_TWITTER_ACCOUNT: "{{ EDXAPP_PLATFORM_TWITTER_ACCOUNT }}" + PLATFORM_FACEBOOK_ACCOUNT: "{{ EDXAPP_PLATFORM_FACEBOOK_ACCOUNT }}" + HELP_TOKENS_BOOKS: "{{ EDXAPP_HELP_TOKENS_BOOKS }}" + # License for serving content in China + ICP_LICENSE: "{{ EDXAPP_ICP_LICENSE }}" + ICP_LICENSE_INFO: "{{ EDXAPP_ICP_LICENSE_INFO }}" + # Base Cookie Domain to share cookie across edx domains + BASE_COOKIE_DOMAIN: "{{ EDXAPP_BASE_COOKIE_DOMAIN }}" + POLICY_CHANGE_GRADES_ROUTING_KEY: "{{ EDXAPP_POLICY_CHANGE_GRADES_ROUTING_KEY }}" + SINGLE_LEARNER_COURSE_REGRADE_ROUTING_KEY: "{{ EDXAPP_SINGLE_LEARNER_COURSE_REGRADE_ROUTING_KEY }}" + PROCTORING_SETTINGS: "{{ EDXAPP_PROCTORING_SETTINGS }}" + EXTRA_MIDDLEWARE_CLASSES: "{{ EDXAPP_EXTRA_MIDDLEWARE_CLASSES }}" + MAINTENANCE_BANNER_TEXT: "{{ EDXAPP_MAINTENANCE_BANNER_TEXT }}" + + RETIRED_USERNAME_PREFIX: "{{ EDXAPP_RETIRED_USERNAME_PREFIX }}" + RETIRED_EMAIL_PREFIX: "{{ EDXAPP_RETIRED_EMAIL_PREFIX }}" + RETIRED_EMAIL_DOMAIN: "{{ EDXAPP_RETIRED_EMAIL_DOMAIN }}" + RETIRED_USER_SALTS: "{{ EDXAPP_RETIRED_USER_SALTS }}" + RETIREMENT_SERVICE_WORKER_USERNAME: "{{ EDXAPP_RETIREMENT_SERVICE_USER_NAME }}" + RETIREMENT_STATES: "{{ EDXAPP_RETIREMENT_STATES }}" + + USERNAME_REPLACEMENT_WORKER: "{{ EDXAPP_USERNAME_REPLACEMENT_WORKER }}" + + AUTH_PASSWORD_VALIDATORS: "{{ EDXAPP_AUTH_PASSWORD_VALIDATORS }}" + PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: "{{ EDXAPP_PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG }}" + # Dashboard courses default limit + DASHBOARD_COURSE_LIMIT: "{{ EDXAPP_DASHBOARD_COURSE_LIMIT }}" + COMPLETION_AGGREGATOR_URL: "{{ EDXAPP_COMPLETION_AGGREGATOR_URL }}" lms_auth_config: <<: *edxapp_generic_auth + CONTENTSTORE: + <<: *edxapp_generic_default_contentstore + ADDITIONAL_OPTIONS: "{{ EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS }}" + DOC_STORE_CONFIG: "{{ EDXAPP_LMS_SPLIT_DOC_STORE_CONFIG }}" + DOC_STORE_CONFIG: "{{ EDXAPP_LMS_SPLIT_DOC_STORE_CONFIG }}" + SEGMENT_KEY: "{{ EDXAPP_LMS_SEGMENT_KEY }}" + OPTIMIZELY_PROJECT_ID: "{{ EDXAPP_OPTIMIZELY_PROJECT_ID }}" + EDX_API_KEY: "{{ EDXAPP_EDX_API_KEY }}" + VERIFY_STUDENT: "{{ EDXAPP_VERIFY_STUDENT }}" + GOOGLE_ANALYTICS_LINKEDIN: "{{ EDXAPP_GOOGLE_ANALYTICS_LINKEDIN }}" + GOOGLE_ANALYTICS_TRACKING_ID: "{{ EDXAPP_GOOGLE_ANALYTICS_TRACKING_ID }}" + TRACKING_SEGMENTIO_WEBHOOK_SECRET: "{{ EDXAPP_TRACKING_SEGMENTIO_WEBHOOK_SECRET }}" + PROFILE_IMAGE_HASH_SEED: "{{ EDXAPP_PROFILE_IMAGE_HASH_SEED }}" MODULESTORE: default: - ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore' - OPTIONS: - mappings: $EDXAPP_XML_MAPPINGS - stores: - xml: - ENGINE: 'xmodule.modulestore.xml.XMLModuleStore' - OPTIONS: - data_dir: $edxapp_course_data_dir - default_class: 'xmodule.hidden_module.HiddenDescriptor' - default: - OPTIONS: - default_class: 'xmodule.hidden_module.HiddenDescriptor' - host: $EDXAPP_MONGO_HOSTS - db: $EDXAPP_MONGO_DB_NAME - collection: 'modulestore' - render_template: 'edxmako.shortcuts.render_to_string' - user: $EDXAPP_MONGO_USER - password: $EDXAPP_MONGO_PASSWORD - port: $EDXAPP_MONGO_PORT - fs_root: $edxapp_course_data_dir - ENGINE: 'xmodule.modulestore.mongo.MongoModuleStore' - DOC_STORE_CONFIG: *edxapp_generic_default_docstore + ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore' + OPTIONS: + mappings: {} + stores: + - NAME: 'split' + ENGINE: 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore' + DOC_STORE_CONFIG: "{{ EDXAPP_LMS_SPLIT_DOC_STORE_CONFIG }}" + OPTIONS: + default_class: 'xmodule.hidden_block.HiddenBlock' + fs_root: "{{ edxapp_course_data_dir }}" + render_template: 'common.djangoapps.edxmako.shortcuts.render_to_string' + - NAME: 'draft' + ENGINE: 'xmodule.modulestore.mongo.DraftMongoModuleStore' + DOC_STORE_CONFIG: "{{ EDXAPP_LMS_DRAFT_DOC_STORE_CONFIG }}" + OPTIONS: + default_class: 'xmodule.hidden_block.HiddenBlock' + fs_root: "{{ edxapp_course_data_dir }}" + render_template: 'common.djangoapps.edxmako.shortcuts.render_to_string' + SOCIAL_AUTH_OAUTH_SECRETS: "{{ EDXAPP_SOCIAL_AUTH_OAUTH_SECRETS }}" + ACE_CHANNEL_SAILTHRU_API_KEY: "{{ EDXAPP_ACE_CHANNEL_SAILTHRU_API_KEY }}" + ACE_CHANNEL_SAILTHRU_API_SECRET: "{{ EDXAPP_ACE_CHANNEL_SAILTHRU_API_SECRET }}" + ANALYTICS_API_KEY: "{{ EDXAPP_LMS_ANALYTICS_API_KEY }}" + FERNET_KEYS: "{{ EDXAPP_FERNET_KEYS }}" + lms_env_config: <<: *edxapp_generic_env - 'CODE_JAIL': - # from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None - 'python_bin': '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}' - 'limits': - 'VMEM': 0 - 'REALTIME': 5 - 'user': '{{ edxapp_sandbox_user }}' + OAUTH_ENFORCE_SECURE: "{{ EDXAPP_OAUTH_ENFORCE_SECURE }}" + OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS: "{{ EDXAPP_OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS }}" + OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS: "{{ EDXAPP_OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS }}" + OAUTH_DELETE_EXPIRED: "{{ EDXAPP_OAUTH_DELETE_EXPIRED }}" + PAID_COURSE_REGISTRATION_CURRENCY: "{{ EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY }}" + THIRD_PARTY_AUTH_BACKENDS: "{{ EDXAPP_THIRD_PARTY_AUTH_BACKENDS }}" + GIT_REPO_DIR: "{{ EDXAPP_GIT_REPO_DIR }}" + SITE_NAME: "{{ EDXAPP_LMS_SITE_NAME }}" + HTTPS: "{{ EDXAPP_LMS_HTTPS }}" + VIDEO_CDN_URL: "{{ EDXAPP_VIDEO_CDN_URLS }}" + PDF_RECEIPT_TAX_ID: "{{ EDXAPP_PDF_RECEIPT_TAX_ID }}" + PDF_RECEIPT_FOOTER_TEXT: "{{ EDXAPP_PDF_RECEIPT_FOOTER_TEXT }}" + PDF_RECEIPT_DISCLAIMER_TEXT: "{{ EDXAPP_PDF_RECEIPT_DISCLAIMER_TEXT }}" + PDF_RECEIPT_BILLING_ADDRESS: "{{ EDXAPP_PDF_RECEIPT_BILLING_ADDRESS }}" + PDF_RECEIPT_TERMS_AND_CONDITIONS: "{{ EDXAPP_PDF_RECEIPT_TERMS_AND_CONDITIONS }}" + PDF_RECEIPT_TAX_ID_LABEL: "{{ EDXAPP_PDF_RECEIPT_TAX_ID_LABEL }}" + PDF_RECEIPT_COBRAND_LOGO_PATH: "{{ EDXAPP_PDF_RECEIPT_COBRAND_LOGO_PATH }}" + PDF_RECEIPT_LOGO_PATH: "{{ EDXAPP_PDF_RECEIPT_LOGO_PATH }}" + PROFILE_IMAGE_BACKEND: "{{ EDXAPP_PROFILE_IMAGE_BACKEND }}" + PROFILE_IMAGE_MIN_BYTES: "{{ EDXAPP_PROFILE_IMAGE_MIN_BYTES }}" + PROFILE_IMAGE_MAX_BYTES: "{{ EDXAPP_PROFILE_IMAGE_MAX_BYTES }}" + PROFILE_IMAGE_SIZES_MAP: "{{ EDXAPP_PROFILE_IMAGE_SIZES_MAP }}" + EDXNOTES_PUBLIC_API: "{{ EDXAPP_EDXNOTES_PUBLIC_API }}" + EDXNOTES_INTERNAL_API: "{{ EDXAPP_EDXNOTES_INTERNAL_API }}" + LTI_USER_EMAIL_DOMAIN: "{{ EDXAPP_LTI_USER_EMAIL_DOMAIN }}" + LTI_AGGREGATE_SCORE_PASSBACK_DELAY: "{{ EDXAPP_LTI_AGGREGATE_SCORE_PASSBACK_DELAY }}" + CREDIT_HELP_LINK_URL: "{{ EDXAPP_CREDIT_HELP_LINK_URL }}" + MAILCHIMP_NEW_USER_LIST_ID: "{{ EDXAPP_MAILCHIMP_NEW_USER_LIST_ID }}" + CONTACT_MAILING_ADDRESS: "{{ EDXAPP_CONTACT_MAILING_ADDRESS }}" + API_ACCESS_MANAGER_EMAIL: "{{ EDXAPP_API_ACCESS_MANAGER_EMAIL }}" + API_ACCESS_FROM_EMAIL: "{{ EDXAPP_API_ACCESS_FROM_EMAIL }}" + API_DOCUMENTATION_URL: "{{ EDXAPP_API_DOCUMENTATION_URL }}" + AUTH_DOCUMENTATION_URL: "{{ EDXAPP_AUTH_DOCUMENTATION_URL }}" + RECALCULATE_GRADES_ROUTING_KEY: "{{ EDXAPP_RECALCULATE_GRADES_ROUTING_KEY }}" + BULK_EMAIL_ROUTING_KEY_SMALL_JOBS: "{{ EDXAPP_BULK_EMAIL_ROUTING_KEY_SMALL_JOBS }}" + CELERY_QUEUES: "{{ EDXAPP_LMS_CELERY_QUEUES }}" + ALTERNATE_WORKER_QUEUES: "cms" + ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES: "{{ EDXAPP_ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES }}" + ENTERPRISE_CUSTOMER_SUCCESS_EMAIL: "{{ EDXAPP_ENTERPRISE_CUSTOMER_SUCCESS_EMAIL }}" + ENTERPRISE_INTEGRATIONS_EMAIL: "{{ EDXAPP_ENTERPRISE_INTEGRATIONS_EMAIL }}" + ENTERPRISE_ENROLLMENT_API_URL: "{{ EDXAPP_ENTERPRISE_ENROLLMENT_API_URL }}" + ENTERPRISE_SUPPORT_URL: "{{ EDXAPP_ENTERPRISE_SUPPORT_URL }}" + PARENTAL_CONSENT_AGE_LIMIT: "{{ EDXAPP_PARENTAL_CONSENT_AGE_LIMIT }}" + ACE_ENABLED_CHANNELS: "{{ EDXAPP_ACE_ENABLED_CHANNELS }}" + ACE_ENABLED_POLICIES: "{{ EDXAPP_ACE_ENABLED_POLICIES }}" + ACE_CHANNEL_SAILTHRU_DEBUG: "{{ EDXAPP_ACE_CHANNEL_SAILTHRU_DEBUG }}" + ACE_CHANNEL_SAILTHRU_TEMPLATE_NAME: "{{ EDXAPP_ACE_CHANNEL_SAILTHRU_TEMPLATE_NAME }}" + ACE_ROUTING_KEY: "{{ EDXAPP_ACE_ROUTING_KEY }}" + ACE_CHANNEL_DEFAULT_EMAIL: "{{ EDXAPP_ACE_CHANNEL_DEFAULT_EMAIL }}" + ACE_CHANNEL_TRANSACTIONAL_EMAIL: "{{ EDXAPP_ACE_CHANNEL_TRANSACTIONAL_EMAIL }}" + ORGANIZATIONS_AUTOCREATE: "{{ EDXAPP_ORGANIZATIONS_AUTOCREATE }}" + ENTERPRISE_TAGLINE: "{{ EDXAPP_ENTERPRISE_TAGLINE }}" + ANALYTICS_API_URL: "{{ EDXAPP_LMS_ANALYTICS_API_URL }}" + GOOGLE_SITE_VERIFICATION_ID: "{{ EDXAPP_GOOGLE_SITE_VERIFICATION_ID }}" + STATIC_URL_BASE: "{{ EDXAPP_LMS_STATIC_URL_BASE }}" + X_FRAME_OPTIONS: "{{ EDXAPP_X_FRAME_OPTIONS }}" + WRITABLE_GRADEBOOK_URL: "{{ EDXAPP_LMS_WRITABLE_GRADEBOOK_URL }}" + PROFILE_MICROFRONTEND_URL: "{{ EDXAPP_PROFILE_MICROFRONTEND_URL }}" + ORDER_HISTORY_MICROFRONTEND_URL: "{{ EDXAPP_ORDER_HISTORY_MICROFRONTEND_URL }}" + PROGRAM_CERTIFICATES_ROUTING_KEY: "{{ EDXAPP_PROGRAM_CERTIFICATES_ROUTING_KEY }}" + ACCOUNT_MICROFRONTEND_URL: "{{ EDXAPP_ACCOUNT_MICROFRONTEND_URL }}" + PROGRAM_CONSOLE_MICROFRONTEND_URL: "{{ EDXAPP_PROGRAM_CONSOLE_MICROFRONTEND_URL}}" + LEARNING_MICROFRONTEND_URL: "{{ EDXAPP_LEARNING_MICROFRONTEND_URL}}" + DCS_SESSION_COOKIE_SAMESITE: "{{ EDXAPP_DCS_SESSION_COOKIE_SAMESITE }}" + DCS_SESSION_COOKIE_SAMESITE_FORCE_ALL: "{{ EDXAPP_DCS_SESSION_COOKIE_SAMESITE_FORCE_ALL }}" cms_auth_config: <<: *edxapp_generic_auth -cms_env_config: - <<: *edxapp_generic_env -lms_preview_auth_config: - <<: *edxapp_generic_auth + CONTENTSTORE: + <<: *edxapp_generic_default_contentstore + ADDITIONAL_OPTIONS: "{{ EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS }}" + DOC_STORE_CONFIG: "{{ EDXAPP_CMS_DOC_STORE_CONFIG }}" + DOC_STORE_CONFIG: "{{ EDXAPP_CMS_DOC_STORE_CONFIG }}" MODULESTORE: - default: *edxapp_generic_default_modulestore -lms_preview_env_config: - <<: *edxapp_generic_env - + default: + ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore' + OPTIONS: + mappings: {} + stores: + - NAME: 'split' + ENGINE: 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore' + DOC_STORE_CONFIG: "{{ EDXAPP_CMS_DOC_STORE_CONFIG }}" + OPTIONS: + default_class: 'xmodule.hidden_block.HiddenBlock' + fs_root: "{{ edxapp_course_data_dir }}" + render_template: 'common.djangoapps.edxmako.shortcuts.render_to_string' + - NAME: 'draft' + ENGINE: 'xmodule.modulestore.mongo.DraftMongoModuleStore' + DOC_STORE_CONFIG: "{{ EDXAPP_CMS_DOC_STORE_CONFIG }}" + OPTIONS: + default_class: 'xmodule.hidden_block.HiddenBlock' + fs_root: "{{ edxapp_course_data_dir }}" + render_template: 'common.djangoapps.edxmako.shortcuts.render_to_string' + SEGMENT_KEY: "{{ EDXAPP_CMS_SEGMENT_KEY }}" + PARSE_KEYS: "{{ EDXAPP_PARSE_KEYS }}" + FERNET_KEYS: "{{ EDXAPP_FERNET_KEYS }}" +cms_env_config: + <<: *edxapp_generic_env + SITE_NAME: "{{ EDXAPP_CMS_SITE_NAME }}" + GIT_REPO_EXPORT_DIR: "{{ EDXAPP_GIT_REPO_EXPORT_DIR }}" + CELERY_QUEUES: "{{ EDXAPP_CMS_CELERY_QUEUES }}" + ALTERNATE_WORKER_QUEUES: "lms" + COURSE_IMPORT_EXPORT_BUCKET: "{{ EDXAPP_IMPORT_EXPORT_BUCKET }}" + STATIC_URL_BASE: "{{ EDXAPP_CMS_STATIC_URL_BASE }}" + X_FRAME_OPTIONS: "{{ EDXAPP_X_FRAME_OPTIONS }}" + COURSE_AUTHORING_MICROFRONTEND_URL: "{{ EDXAPP_COURSE_AUTHORING_MICROFRONTEND_URL }}" # install dir for the edx-platform repo edxapp_code_dir: "{{ edxapp_app_dir }}/edx-platform" @@ -422,9 +1728,6 @@ edxapp_cms_gunicorn_port: 8010 edxapp_cms_gunicorn_host: 127.0.0.1 edxapp_lms_gunicorn_port: 8000 edxapp_lms_gunicorn_host: 127.0.0.1 -edxapp_lms_preview_gunicorn_port: 8020 -edxapp_lms_preview_gunicorn_host: 127.0.0.1 - # These vars are for creating the application json config # files. There are two for each service that uses the @@ -436,90 +1739,175 @@ service_variants_enabled: - lms - cms -edxapp_lms_env: 'lms.envs.aws' -edxapp_cms_env: 'cms.envs.aws' - #Number of gunicorn worker processes to spawn, as a multiplier to number of virtual cores worker_core_mult: lms: 4 - lms_preview: 2 cms: 2 -# Theming -# Turn theming on and off with edxapp_use_custom_theme +# Stanford-style Theming # Set theme name with edxapp_theme_name # Stanford, for example, uses edxapp_theme_name: 'stanford' # # TODO: change variables to ALL-CAPS, since they are meant to be externally overridden -edxapp_use_custom_theme: false edxapp_theme_name: "" edxapp_theme_source_repo: 'https://{{ COMMON_GIT_MIRROR }}/Stanford-Online/edx-theme.git' -edxapp_theme_version: 'HEAD' +EDXAPP_THEME_VERSION: 'master' # make this the public URL instead of writable -edx_platform_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-platform.git" -# `edx_platform_version` can be anything that git recognizes as a commit +edx_platform_repo: "https://{{ COMMON_GIT_MIRROR }}/openedx/edx-platform.git" +# `EDX_PLATFORM_VERSION` can be anything that git recognizes as a commit # reference, including a tag, a branch name, or a commit hash -edx_platform_version: 'release' -local_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/local.txt" -pre_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/pre.txt" -post_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/post.txt" +EDX_PLATFORM_VERSION: 'release' +custom_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/custom.txt" base_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/base.txt" -github_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/github.txt" -repo_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/repo.txt" +django_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/django.txt" +openstack_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/openstack.txt" -sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base.txt" -sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt" -sandbox_post_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/post.txt" +sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/py38.txt" + +# The Python requirements files in the order they should be installed. This order should +# match the order of PYTHON_REQ_FILES in edx-platform/pavelib/prereqs.py. +edxapp_requirements_files: + - "{{ custom_requirements_file }}" + - "{{ base_requirements_file }}" + - "{{ django_requirements_file }}" + +# All edxapp requirements files potentially containing Github URLs. When using a custom +# Github mirror, occurrences of "github.com" are replaced by the custom mirror in these +# files. +edxapp_requirements_with_github_urls: + - "{{ custom_requirements_file }}" + - "{{ base_requirements_file }}" + - "{{ sandbox_base_requirements }}" edxapp_debian_pkgs: - - npm # for compiling the virtualenv # (only needed if wheel files aren't available) - - build-essential - s3cmd - pkg-config - - graphviz-dev - - graphviz - - libmysqlclient-dev # for scipy, do not install # libopenblas-base, it will cause # problems for numpy - - gfortran - - libatlas3gf-base - - liblapack-dev - g++ - - libxml2-dev - - libxslt1-dev # apparmor - apparmor-utils # misc - curl - - ipython - - npm - ntp - # for shapely - - libgeos-dev - # i18n - - gettext - # Pillow (PIL Fork) Dependencies - # Needed by the CMS to manipulate images. - - libjpeg8-dev - - libpng12-dev - -# Ruby Specific Vars -edxapp_ruby_version: "1.9.3-p374" + # matplotlib needs libfreetype6-dev + - libfreetype6-dev + # cffi needs libffi-dev + - libffi-dev + - python3-dev + - libsqlite3-dev + +edxapp_release_specific_debian_pkgs: + xenial: + - ipython + - python-dev + bionic: + - ipython + - python-dev + focal: + - ipython3 + +edxapp_debian_pkgs_default: "{{ edxapp_debian_pkgs + edxapp_release_specific_debian_pkgs[ansible_distribution_release] }}" + +edxapp_debian_pkgs_extra: [] # Deploy Specific Vars edxapp_lms_variant: lms edxapp_cms_variant: cms # Worker Settings -worker_django_settings_module: 'aws' - -# This array is used by the automator role to provide -# access to a limited set of commands via rbash. The -# commands listed here will be symlinked to ~/bin/ for -# the automator user. -edxapp_automated_rbash_links: - - /usr/bin/sudo +worker_django_settings_module: '{{ EDXAPP_SETTINGS }}' +EDXAPP_CELERY_HEARTBEAT_ENABLED: true + +# Add default service worker users +SERVICE_WORKER_USERS: + - email: "{{ EDXAPP_ENTERPRISE_SERVICE_WORKER_EMAIL }}" + username: "{{ EDXAPP_ENTERPRISE_SERVICE_WORKER_USERNAME }}" + is_staff: true + is_superuser: false + - email: "{{ EDXAPP_VEDA_SERVICE_USER_EMAIL }}" + username: "{{ EDXAPP_VEDA_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ DISCOVERY_SERVICE_USER_EMAIL }}" + username: "{{ DISCOVERY_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ INSIGHTS_SERVICE_USER_EMAIL }}" + username: "{{ INSIGHTS_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ CREDENTIALS_SERVICE_USER_EMAIL }}" + username: "{{ CREDENTIALS_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ DESIGNER_SERVICE_USER_EMAIL }}" + username: "{{ DESIGNER_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ REGISTRAR_SERVICE_USER_EMAIL }}" + username: "{{ REGISTRAR_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ EDXAPP_CMS_SERVICE_USER_EMAIL }}" + username: "{{ EDXAPP_CMS_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ LICENSE_MANAGER_SERVICE_USER_EMAIL }}" + username: "{{ LICENSE_MANAGER_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ COMMERCE_COORDINATOR_SERVICE_USER_EMAIL }}" + username: "{{ COMMERCE_COORDINATOR_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ ENTERPRISE_CATALOG_SERVICE_USER_EMAIL }}" + username: "{{ ENTERPRISE_CATALOG_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ EDX_EXAMS_SERVICE_USER_EMAIL }}" + username: "{{ EDX_EXAMS_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ ECOMMERCE_SERVICE_USER_EMAIL }}" + username: "{{ ECOMMERCE_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ EDXAPP_RETIREMENT_SERVICE_USER_EMAIL }}" + username: "{{ EDXAPP_RETIREMENT_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + enabled: "{{ COMMON_RETIREMENT_SERVICE_SETUP | default(false) }}" + +EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION: false + +EDXAPP_LMS_LOCAL_CONFIG_FILE: "{{ UNENCRYPTED_CFG_DIR }}/lms.yml" +EDXAPP_CMS_LOCAL_CONFIG_FILE: "{{ UNENCRYPTED_CFG_DIR }}/studio.yml" + +edxapp_staticfiles_storage_overrides: !!null + +# Accepts a list of dictionaries of the following form. +# EDXAPP_SITE_CONFIGURATION: +# - site_id: 1 +# values: +# foo: true +# bar: false +# - domain: example.com +# values: +# abc: true +# - values: +# xyz: true +# +# In each dictionary, the 'site_id' and the 'domain' keys are optional and the 'values' key +# is required. However, only one of 'site_id', 'domain' can be specified due to the behaviour +# of the 'create_or_update_site_configuration' management command. The 'values' key accepts a +# dictionary of keys and values corresponding to the SiteConfiguration paramters to be added to the +# SiteConfiguration instance. + +EDXAPP_SITE_CONFIGURATION: {} + +EDXAPP_ENABLE_MONGODB_INDEXES: false diff --git a/playbooks/roles/edxapp/handlers/main.yml b/playbooks/roles/edxapp/handlers/main.yml deleted file mode 100644 index ae6e10524f7..00000000000 --- a/playbooks/roles/edxapp/handlers/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: restart edxapp - supervisorctl_local: > - state=restarted - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - name="edxapp:{{ item }}" - when: edxapp_installed is defined and celery_worker is not defined and not devstack - sudo_user: "{{ supervisor_service_user }}" - with_items: service_variants_enabled - -- name: restart edxapp_workers - supervisorctl_local: > - name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - when: edxapp_installed is defined and celery_worker is defined and not devstack - with_items: edxapp_workers - sudo_user: "{{ common_web_user }}" diff --git a/playbooks/roles/edxapp/meta/main.yml b/playbooks/roles/edxapp/meta/main.yml index 893810f9d89..4d9502c414c 100644 --- a/playbooks/roles/edxapp/meta/main.yml +++ b/playbooks/roles/edxapp/meta/main.yml @@ -1,14 +1,16 @@ --- dependencies: - - supervisor - - role: rbenv - rbenv_user: "{{ edxapp_user }}" - rbenv_dir: "{{ edxapp_app_dir }}" - rbenv_ruby_version: "{{ edxapp_ruby_version }}" - - devpi - - role: automated - automated_rbash_links: "{{ edxapp_automated_rbash_links }}" - automated_sudoers_dest: '99-automator-edxapp-server' - automated_sudoers_template: 'roles/edxapp/templates/etc/sudoers.d/99-automator-edxapp-server.j2' - automated_authorized_keys: "{{ EDXAPP_AUTOMATOR_AUTHORIZED_KEYS }}" - when: EDXAPP_INCLUDE_AUTOMATOR_ROLE + - common + - role: supervisor + supervisor_spec: + - service: edxapp + migration_check_services: "lms,cms,workers" + python: "{{ edxapp_venv_bin }}/python" + code: "{{ edxapp_code_dir | default(None) }}" + env: "{{ edxapp_app_dir | default(None) }}/edxapp_env" + - edxapp_common + - nltk + - role: edx_themes + theme_users: + - "{{ edxapp_user }}" + when: EDXAPP_ENABLE_COMPREHENSIVE_THEMING diff --git a/playbooks/roles/edxapp/tasks/deploy.yml b/playbooks/roles/edxapp/tasks/deploy.yml index 716ce3975f0..d27bdab0782 100644 --- a/playbooks/roles/edxapp/tasks/deploy.yml +++ b/playbooks/roles/edxapp/tasks/deploy.yml @@ -1,209 +1,322 @@ -- name: setup the edxapp env - notify: - - "restart edxapp" - - "restart edxapp_workers" - template: > - src=edxapp_env.j2 dest={{ edxapp_app_dir }}/edxapp_env - owner={{ edxapp_user }} group={{ common_web_user }} - mode=0644 - -# Optional auth for git -- name: create ssh script for git (not authenticated) - template: > - src=git_ssh_noauth.sh.j2 dest={{ edxapp_git_ssh }} - owner={{ edxapp_user }} mode=750 - when: not EDXAPP_USE_GIT_IDENTITY - -- name: create ssh script for git (authenticated) - template: > - src=git_ssh_auth.sh.j2 dest={{ edxapp_git_ssh }} - owner={{ edxapp_user }} mode=750 - when: EDXAPP_USE_GIT_IDENTITY +--- +- name: copy the template to the desired location + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ item.owner }}" + group: "{{ item.group }}" + mode: "{{ item.mode }}" + with_items: + - { src: 'edxapp_env.j2', dest: '{{ edxapp_app_dir }}/edxapp_env', owner: '{{ edxapp_user }}', group: '{{ common_web_user }}', mode: '0644' } + - { src: 'newrelic.ini.j2', dest: '{{ edxapp_app_dir }}/newrelic.ini', owner: '{{ edxapp_user }}', group: '{{ common_web_user }}', mode: '0644' } + - { src: 'git_ssh.sh.j2', dest: '{{ edxapp_git_ssh }}', owner: '{{ edxapp_user }}', group: '{{ edxapp_user }}', mode: '0750' } + - { src: 'boto.j2', dest: '{{ edxapp_app_dir }}/.boto', owner: '{{ edxapp_user }}', group: '{{ common_web_user }}', mode: '0644' } + tags: + - install + - install:base - name: install read-only ssh key - copy: > - src={{ EDXAPP_LOCAL_GIT_IDENTITY }} dest={{ edxapp_git_identity }} - force=yes owner={{ edxapp_user }} mode=0600 + copy: + content: "{{ EDXAPP_GIT_IDENTITY }}" + dest: "{{ edxapp_git_identity }}" + force: yes + owner: "{{ edxapp_user }}" + mode: "0600" when: EDXAPP_USE_GIT_IDENTITY + tags: + - install + - install:base + +# On devstack, tell Git that repos owner by other users are safe. +# This is necessary in git 2.35.2 and higher. Devstack uses a mix of +# root and edxapp and git+https pip dependencies end up cloning repos +# into an open-ended set of directories, so our best bet is to just +# say every dir on devstack is safe. +- name: Mark all directories as safe for git on devstack + shell: "git config --global --add safe.directory '*'" + become: true + when: "({{ devstack | default(False) }} or {{ edx_django_service_is_devstack | default(False) }})" + tags: + - devstack + +- name: set git fetch.prune to ignore deleted remote refs + shell: git config --global fetch.prune true + become_user: "{{ edxapp_user }}" + tags: + - install + - install:base # Do A Checkout -- name: checkout edx-platform repo into {{edxapp_code_dir}} - git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}} - register: chkout - sudo_user: "{{ edxapp_user }}" - environment: - GIT_SSH: "{{ edxapp_git_ssh }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" +- name: checkout edx-platform repo into {{ edxapp_code_dir }} + git: + dest: "{{ edxapp_code_dir }}" + repo: "{{ edx_platform_repo }}" + version: "{{ EDX_PLATFORM_VERSION }}" + accept_hostkey: yes + key_file: "{% if EDXAPP_USE_GIT_IDENTITY %}{{ edxapp_git_identity }}{% endif %}" + become_user: "{{ edxapp_user }}" + register: edxapp_platform_checkout + tags: + - install + - install:code - name: git clean after checking out edx-platform - shell: cd {{edxapp_code_dir}} && git clean -xdf - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -- name: checkout theme - git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}} + shell: cd {{ edxapp_code_dir }} && git clean -xdf + become_user: "{{ edxapp_user }}" + tags: + - install + - install:code + +# edX supports two theme types, Comprehensive themes and the older +# Stanford-style themes. They are mutually exclusive. +# +# To enable Stanford theming, point edxapp_theme_source_repo +# (yes, lowercase) to a Stanford-style theme and set +# edxapp_theme_name (again, lowercase) to its name. +- name: checkout Stanford-style theme + git: + dest: "{{ edxapp_app_dir }}/themes/{{ edxapp_theme_name }}" + repo: "{{ edxapp_theme_source_repo }}" + version: "{{ EDXAPP_THEME_VERSION }}" + accept_hostkey: yes + key_file: "{% if EDXAPP_USE_GIT_IDENTITY %}{{ edxapp_git_identity }}{% endif %}" when: edxapp_theme_name != '' - sudo_user: "{{ edxapp_user }}" - environment: - GIT_SSH: "{{ edxapp_git_ssh }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -- name: remove read-only ssh key - file: path={{ edxapp_git_identity }} state=absent - when: EDXAPP_USE_GIT_IDENTITY - -- name: create checksum for requirements, package.json and Gemfile - shell: > - /usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new - sudo_user: "{{ edxapp_user }}" - ignore_errors: true - -- stat: path=/var/tmp/edxapp.req.new - register: new - sudo_user: "{{ edxapp_user }}" - -- stat: path=/var/tmp/edxapp.req.installed - register: inst - sudo_user: "{{ edxapp_user }}" + become_user: "{{ edxapp_user }}" + register: edxapp_theme_checkout + tags: + - install + - install:code + +# Download a theme and apply small modifications like SASS changes +# To enable/disable this, set SIMPLETHEME_ENABLE_DEPLOY +# https://github.com/ansible/ansible/issues/19472 prevents including the +# role conditionally +- name: Install a theme through simpletheme + include_role: + name: "simple_theme" + +- name: Stat each requirements file with Github URLs to ensure it exists + stat: + path: "{{ item }}" + with_items: "{{ edxapp_requirements_with_github_urls }}" + register: requirement_file_stats + tags: + - install + - install:code + - install:app-requirements # Substitute github mirror in all requirements files # This is run on every single deploy - name: Updating requirement files for git mirror command: | - /bin/sed -i -e 's/github\.com/{{ COMMON_GIT_MIRROR }}/g' {{ " ".join(edxapp_all_req_files) }} - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -# Ruby plays that need to be run after platform updates. -- name: gem install bundler - shell: > - gem install bundle - chdir={{ edxapp_code_dir }} - executable=/bin/bash - environment: "{{ edxapp_environment }}" - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -- name: bundle install - shell: > - bundle install --binstubs - chdir={{ edxapp_code_dir }} - executable=/bin/bash - sudo_user: "{{ edxapp_user }}" - environment: "{{ edxapp_environment }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -# Set the npm registry -- name: Set the npm registry - shell: - npm config set registry 'http://registry.npmjs.org/' - creates="{{ edxapp_app_dir }}/.npmrc" - sudo_user: "{{ edxapp_user }}" - environment: "{{ edxapp_environment }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -# Node play that need to be run after platform updates. -- name: Install edx-platform npm dependencies - shell: npm install chdir={{ edxapp_code_dir }} - sudo_user: "{{ edxapp_user }}" + /bin/sed -i -e 's/github\.com/{{ COMMON_GIT_MIRROR }}/g' {{ item.item }} + become_user: "{{ edxapp_user }}" + when: item.stat.exists + with_items: "{{ requirement_file_stats.results }}" + tags: + - install + - install:code + - install:app-requirements + +- name: Create the virtualenv to install the Python requirements + command: "virtualenv {{ edxapp_venv_dir }} -p {{ EDXAPP_PYTHON_VERSION }} --always-copy" + args: + chdir: "{{ edxapp_code_dir }}" + creates: "{{ edxapp_venv_dir }}/bin/pip" + become_user: "{{ edxapp_user }}" environment: "{{ edxapp_environment }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - - -# Install the python pre requirements into {{ edxapp_venv_dir }} -- name : install python pre-requirements - pip: > - requirements="{{pre_requirements_file}}" - virtualenv="{{edxapp_venv_dir}}" - state=present - extra_args="-i {{ edxapp_pypi_local_mirror }}" - sudo_user: "{{ edxapp_user }}" + tags: + - install + - install:app-requirements + +- name: Pin pip to a specific version. + command: "{{ edxapp_venv_dir }}/bin/pip install pip=={{ COMMON_PIP_VERSION }}" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ edxapp_user }}" environment: "{{ edxapp_environment }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 + tags: + - install + - install:app-requirements + +- name: Stat each Python requirements file to ensure it exists + stat: + path: "{{ item }}" + with_items: "{{ edxapp_requirements_files }} + {{ [openstack_requirements_file] if EDXAPP_SETTINGS == 'openstack' else [] }}" + register: python_requirement_files + tags: + - install + - install:app-requirements -# Install the python modules into {{ edxapp_venv_dir }} -- name : install python base-requirements - # Need to use shell rather than pip so that we can maintain the context of our current working directory; some +# Install the python requirements into {{ edxapp_venv_dir }} +- name: install python requirements + # Need to use command rather than pip so that we can maintain the context of our current working directory; some # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # installs everything into that virtual environment. - shell: > - {{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ base_requirements_file }} - chdir={{ edxapp_code_dir }} + command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ edxapp_user }}" environment: "{{ edxapp_environment }}" - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 - -# Install the python post requirements into {{ edxapp_venv_dir }} -- name : install python post-requirements - pip: > - requirements="{{post_requirements_file}}" - virtualenv="{{edxapp_venv_dir}}" - state=present - extra_args="-i {{ edxapp_pypi_local_mirror }}" - sudo_user: "{{ edxapp_user }}" - environment: "{{ edxapp_environment }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 + when: item.stat.exists + with_items: "{{ python_requirement_files.results }}" + register: edxapp_install_python_reqs + until: edxapp_install_python_reqs is succeeded + retries: 5 + delay: 15 + tags: + - install + - install:app-requirements + +- name: Install local edxapp requirements + command: "{{ edxapp_venv_dir }}/bin/pip install -e {{ edxapp_code_dir }}/." + become_user: "{{ edxapp_user }}" + tags: + - install + - install:app-requirements -# Install the final python modules into {{ edxapp_venv_dir }} -- name : install python post-post requirements +# Private requirements require a ssh key to install, use the same key as the private key for edx-platform +# If EDXAPP_INSTALL_PRIVATE_REQUIREMENTS is set to true EDXAPP_USE_GIT_IDENTITY must also be true +- name: install python private requirements # Need to use shell rather than pip so that we can maintain the context of our current working directory; some # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # installs everything into that virtual environment. - shell: > - {{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} - chdir={{ edxapp_code_dir }} + shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w {{ item.extra_args|default('') }} {{ item.name|quote }}" + args: + chdir: "{{ edxapp_code_dir }}" with_items: - - "{{ repo_requirements_file }}" - - "{{ github_requirements_file }}" - - "{{ local_requirements_file }}" - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - + - "{{ EDXAPP_PRIVATE_REQUIREMENTS }}" + become_user: "{{ edxapp_user }}" + environment: + GIT_SSH: "{{ edxapp_git_ssh }}" + when: EDXAPP_INSTALL_PRIVATE_REQUIREMENTS + register: edxapp_install_private_python_reqs + until: edxapp_install_private_python_reqs is succeeded + retries: 5 + delay: 15 + tags: + - install + - install:app-requirements + +# Install any custom extra requirements if defined in EDXAPP_EXTRA_REQUIREMENTS. +- name: install python extra requirements + pip: + name: "{{ item.name }}" + version: "{{ item.version|default(omit) }}" + extra_args: "--exists-action w {{ item.extra_args|default('') }}" + virtualenv: "{{ edxapp_venv_dir }}" + state: present + with_items: "{{ EDXAPP_EXTRA_REQUIREMENTS }}" + become_user: "{{ edxapp_user }}" + register: edxapp_install_extra_python_reqs + until: edxapp_install_extra_python_reqs is succeeded + retries: 5 + delay: 15 + tags: + - install + - install:app-requirements + +- name: "Install Datadog APM requirements" + when: COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP + pip: + name: + - ddtrace + extra_args: "--exists-action w {{ item.extra_args|default('') }}" + virtualenv: "{{ edxapp_venv_dir }}" + state: present + become_user: "{{ edxapp_user }}" + register: edxapp_install_datadog_reqs + until: edxapp_install_datadog_reqs is succeeded + retries: 5 + delay: 15 + tags: + - install + - install:app-requirements + +# Pulling Atlas translations into the repo needs to happen after +# Python dependencies have been installed. Note: This task leaves the +# git working directory in a "dirty" state. +- name: "Pull translations using Atlas" + shell: | + source "{{ edxapp_venv_dir }}/bin/activate" + make pull_translations + args: + executable: /usr/bin/bash + chdir: "{{ edxapp_code_dir }}" + environment: + # Use production Django settings because otherwise debug_toolbar will be + # referenced and cause an error (we don't have developer Python deps installed.) + EDX_PLATFORM_SETTINGS: production + # Use minimal configs because the real configs aren't installed until + # later in the playbook. + LMS_CFG: lms/envs/minimal.yml + STUDIO_CFG: lms/envs/minimal.yml + OPENEDX_ATLAS_PULL: true + become_user: "{{ edxapp_user }}" + tags: + - install + +# If using CAS and you have a function for mapping attributes, install +# the module here. The next few tasks set up the python code sandbox +- name: install CAS attribute module + pip: + name: "{{ EDXAPP_CAS_ATTRIBUTE_PACKAGE }}" + virtualenv: "{{ edxapp_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + become_user: "{{ edxapp_user }}" + when: EDXAPP_CAS_ATTRIBUTE_PACKAGE|length > 0 + tags: + - install + - install:app-requirements # Install the sandbox python modules into {{ edxapp_venv_dir }} -- name : install sandbox requirements into regular venv +- name: install sandbox requirements into regular venv # Need to use shell rather than pip so that we can maintain the context of our current working directory; some # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # installs everything into that virtual environment. - shell: > - {{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} - chdir={{ edxapp_code_dir }} + shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}" + args: + chdir: "{{ edxapp_code_dir }}" with_items: - "{{ sandbox_base_requirements }}" - - "{{ sandbox_local_requirements }}" - - "{{ sandbox_post_requirements }}" - sudo_user: "{{ edxapp_user }}" - when: "not EDXAPP_PYTHON_SANDBOX and (not inst.stat.exists or new.stat.md5 != inst.stat.md5)" - notify: - - "restart edxapp" - - "restart edxapp_workers" + become_user: "{{ edxapp_user }}" + when: not EDXAPP_PYTHON_SANDBOX + tags: + - install + - install:app-requirements + +# Set the npm registry +# This needs to be done as root since npm is weird about +# chown - https://github.com/npm/npm/issues/3565 +- name: Set the npm registry + shell: "npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'" + args: + creates: "{{ edxapp_app_dir }}/.npmrc" + environment: "{{ edxapp_environment }}" + tags: + - install + - install:app-requirements + +# Set the npm registry permissions +- name: Set the npm registry permissions + file: + path: "{{ edxapp_app_dir }}/.npmrc" + owner: "{{ edxapp_user }}" + group: "{{ edxapp_user }}" + tags: + - install + - install:app-requirements + +- name: install node dependencies + shell: "easy_install --version && npm clean-install" + args: + chdir: "{{ edxapp_code_dir }}" + environment: "{{ edxapp_environment | combine(git_ssh_environment_mixin) }}" + become_user: "{{ edxapp_user }}" + tags: + - install + - install:app-requirements # The next few tasks set up the python code sandbox @@ -213,82 +326,139 @@ command: /usr/sbin/aa-complain /etc/apparmor.d/code.sandbox when: EDXAPP_PYTHON_SANDBOX tags: - - edxapp-sandbox - -- name: code sandbox | Install base sandbox requirements and create sandbox virtualenv - pip: > - requirements="{{sandbox_base_requirements}}" - virtualenv="{{edxapp_sandbox_venv_dir}}" - state=present - extra_args="-i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors" - sudo_user: "{{ edxapp_sandbox_user }}" + - edxapp-sandbox + - install + - install:app-requirements + +- name: Create the virtualenv to install the Python sandbox requirements + command: "virtualenv {{ edxapp_sandbox_venv_dir }} -p {{ edxapp_sandbox_python_version }} --always-copy" + args: + chdir: "{{ edxapp_code_dir }}" + creates: "{{ edxapp_sandbox_venv_dir }}/bin/pip" + become_user: "{{ edxapp_sandbox_user }}" + environment: "{{ edxapp_environment }}" + when: EDXAPP_PYTHON_SANDBOX + tags: + - edxapp-sandbox + - install + - install:app-requirements + +- name: Pin pip to a specific version. + # Not pinning to the same version as everything else because sandboxes are still python 2.7 + command: "{{ edxapp_sandbox_venv_dir }}/bin/pip install pip==21.2.1" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ edxapp_sandbox_user }}" + environment: "{{ edxapp_environment }}" when: EDXAPP_PYTHON_SANDBOX - notify: - - "restart edxapp" - - "restart edxapp_workers" tags: - - edxapp-sandbox + - edxapp-sandbox + - install + - install:app-requirements -- name: code sandbox | Install sandbox requirements into sandbox venv - shell: > - {{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} - chdir={{ edxapp_code_dir }} - with_items: - - "{{ sandbox_local_requirements }}" - - "{{ sandbox_post_requirements }}" - sudo_user: "{{ edxapp_sandbox_user }}" +- name: code sandbox | Install base sandbox requirements and create sandbox virtualenv + pip: + chdir: "{{ edxapp_code_dir }}" + requirements: "{{ sandbox_base_requirements }}" + virtualenv: "{{ edxapp_sandbox_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + virtualenv_python: "{{ edxapp_sandbox_python_version }}" + become_user: "{{ edxapp_sandbox_user }}" when: EDXAPP_PYTHON_SANDBOX - register: sandbox_install_output - changed_when: "'installed' in sandbox_install_output" - notify: - - "restart edxapp" - - "restart edxapp_workers" tags: - - edxapp-sandbox + - edxapp-sandbox + - install + - install:app-requirements - name: code sandbox | put code sandbox into aa-enforce or aa-complain mode, depending on EDXAPP_SANDBOX_ENFORCE command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox when: EDXAPP_PYTHON_SANDBOX tags: - - edxapp-sandbox + - edxapp-sandbox + - install + - install:app-requirements + +- name: code sandbox | test enforcement 1 + command: "{{ edxapp_sandbox_venv_dir }}/bin/python -c \"import os; os.listdir('/')\"" + register: sandbox_test1 + failed_when: "'PermissionError:' not in sandbox_test1.stderr" + when: EDXAPP_SANDBOX_ENFORCE + +- name: code sandbox | test enforcement 2 + command: "{{ edxapp_sandbox_venv_dir }}/bin/python -c \"import subprocess; subprocess.check_output('ls', shell=True)\"" + register: sandbox_test2 + failed_when: "'PermissionError:' not in sandbox_test2.stderr" + when: EDXAPP_SANDBOX_ENFORCE + +- name: code sandbox | test enforcement 3 + command: "{{ edxapp_sandbox_venv_dir }}/bin/python -c \"print(7*11*13*17)\"" + register: sandbox_test3 + failed_when: "'17017' not in sandbox_test3.stdout" + when: EDXAPP_SANDBOX_ENFORCE - name: compiling all py files in the edx-platform repo - shell: "{{ edxapp_venv_bin }}/python -m compileall {{ edxapp_code_dir }}" - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" + shell: "{{ edxapp_venv_bin }}/python -m compileall -q -x '.git/.*|node_modules/.*' {{ edxapp_code_dir }}" + become_user: "{{ edxapp_user }}" + tags: + - install + - install:code # alternative would be to give {{ common_web_user }} read access # to the virtualenv but that permission change will require # root access. - name: give other read permissions to the virtualenv - command: chmod -R o+r "{{ edxapp_venv_dir }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -- name: create checksum for installed requirements - shell: cp /var/tmp/edxapp.req.new /var/tmp/edxapp.req.installed - sudo_user: "{{ edxapp_user }}" - notify: "restart edxapp" - - -# https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726 -# This is necessary for when syncdb is run and the django_openid_auth module is installed, -# not sure if this fix will ever get merged -- name: openid workaround - shell: sed -i -e 's/claimed_id = models.TextField(max_length=2047, unique=True/claimed_id = models.TextField(max_length=2047/' {{ edxapp_venv_dir }}/lib/python2.7/site-packages/django_openid_auth/models.py - when: openid_workaround is defined - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" + file: + path: "{{ edxapp_venv_dir }}" + state: directory + mode: "o+r" + recurse: yes + tags: + - install + - install:code + +- name: "create service wrapper scripts - {{item}}" + template: + dest: "{{ edxapp_app_dir }}/{{item}}" + src: "edx/app/edxapp/{{item}}.j2" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: "og+rx" + with_items: + - "lms.sh" + - "cms.sh" + - "beat_scheduler.sh" + - "worker.sh" + - "reload_lms_config.sh" + - "reload_cms_config.sh" + tags: + - install + - install:configuration + +- name: install single-beat to run only one celerybeat scheduler + pip: + name: "git+{{ EDXAPP_SINGLE_BEAT_REPO }}@{{ EDXAPP_SINGLE_BEAT_VERSION|default(master) }}#egg=single-beat" + virtualenv: "{{ edxapp_venv_dir }}" + state: present + become_user: "{{ edxapp_user }}" + when: EDXAPP_ENABLE_CELERY_BEAT + tags: + - install + - install:app-requirements + +- name: import custom tinymce plugins + include_role: + name: "tinymce_plugins" + when: + - celery_worker is not defined # creates the supervisor jobs for the # service variants configured, runs # gather_assets and db migrations - include: service_variant_config.yml + tags: + - service_variant_config + - deploy # call supervisorctl update. this reloads # the supervisorctl config and restarts @@ -298,38 +468,144 @@ - name: update supervisor configuration shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" register: supervisor_update - sudo_user: "{{ supervisor_service_user }}" - changed_when: supervisor_update.stdout != "" - when: not devstack + become_user: "{{ supervisor_service_user }}" + changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" + when: not disable_edx_services + tags: + - manage - name: ensure edxapp has started - supervisorctl_local: > - state=started - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - name="edxapp:{{ item }}" - sudo_user: "{{ supervisor_service_user }}" - when: celery_worker is not defined and not devstack - with_items: service_variants_enabled + supervisorctl: + name: "{{ item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + become_user: "{{ supervisor_service_user }}" + when: celery_worker is not defined and not disable_edx_services + with_items: + - 'lms' + - 'cms' + tags: + - manage - name: ensure edxapp_workers has started - supervisorctl_local: > - name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - when: celery_worker is defined and not devstack - with_items: edxapp_workers - sudo_user: "{{ supervisor_service_user }}" - -- name: create a symlink for venv python - file: > - src="http://github.com/%7B%7B%20edxapp_venv_bin%20%7D%7D/%7B%7B%20item%20%7D%7D" - dest={{ COMMON_BIN_DIR }}/{{ item }}.edxapp - state=link + supervisorctl: + name: "edxapp_worker:" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + when: celery_worker is defined and not disable_edx_services + become_user: "{{ supervisor_service_user }}" + tags: + - manage + +- name: create symlinks from the repo dir + file: + src: "{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ (item | basename).split('.', 1) | first }}.edxapp" + state: link with_items: - - python - - pip + - '{{ edxapp_code_dir }}/manage.py' + tags: + - install + - install:configuration +- name: remove read-only ssh key + file: + path: "{{ edxapp_git_identity }}" + state: absent + when: EDXAPP_USE_GIT_IDENTITY + tags: + - install + - install:configuration + - install:code -- set_fact: edxapp_installed=true +- include: tag_ec2.yml tags=deploy + when: COMMON_TAG_EC2_INSTANCE + tags: + - remove + - aws + +- set_fact: + edxapp_installed: true + +- name: restart edxapp + supervisorctl: + name: "{{ item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: edxapp_installed is defined and celery_worker is not defined and not disable_edx_services + become_user: "{{ supervisor_service_user }}" + with_items: + - 'lms' + - 'cms' + tags: + - manage + +- name: restart edxapp_workers + supervisorctl: + name: "edxapp_worker:" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: edxapp_installed is defined and celery_worker is defined and not disable_edx_services + become_user: "{{ common_web_user }}" + tags: + - manage + +- name: create service worker users + shell: ". {{ edxapp_app_dir }}/edxapp_env && {{ edxapp_venv_bin }}/python ./manage.py lms --settings={{ edxapp_settings }} --service-variant lms manage_user {{ item.username}} {{ item.email }} --unusable-password {% if item.is_staff %} --staff{% endif %}" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + with_items: "{{ SERVICE_WORKER_USERS }}" + when: CREATE_SERVICE_WORKER_USERS and item.enabled|default(true) + tags: + - manage + - manage:db + +- name: reindex all courses + shell: ". {{ edxapp_app_dir }}/edxapp_env && {{ edxapp_venv_bin }}/python ./manage.py cms reindex_course --setup --settings={{ edxapp_settings }}" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + when: EDXAPP_REINDEX_ALL_COURSES + tags: + - install + - install:base + +- name: install cron job to run clearsessions + cron: + name: "clear expired Django sessions" + user: "{{ edxapp_user }}" + job: ". {{ edxapp_app_dir }}/edxapp_env && {{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py lms clearsessions --settings={{ edxapp_settings }} >/dev/null 2>&1" + hour: "{{ EDXAPP_CLEARSESSIONS_CRON_HOURS }}" + minute: "{{ EDXAPP_CLEARSESSIONS_CRON_MINUTES }}" + day: "*" + when: EDXAPP_CLEARSESSIONS_CRON_ENABLED + +- name: install additional cron jobs + cron: "{{ item }}" + with_items: "{{ EDXAPP_ADDITIONAL_CRON_JOBS }}" + +- name: populate retirement states + shell: ". {{ edxapp_app_dir }}/edxapp_env && {{ edxapp_venv_bin }}/python ./manage.py lms --settings={{ edxapp_settings }} populate_retirement_states" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + when: COMMON_RETIREMENT_SERVICE_SETUP | default(false) + tags: + - manage + - manage:db + +- name: ensure indexes on contentstore and modulestore dbs + shell: ". {{ edxapp_app_dir }}/edxapp_env && {{ edxapp_venv_bin }}/python ./manage.py lms --settings={{ edxapp_settings }} ensure_indexes" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + when: EDXAPP_ENABLE_MONGODB_INDEXES + run_once: yes + tags: + - manage + - manage:db diff --git a/playbooks/roles/edxapp/tasks/main.yml b/playbooks/roles/edxapp/tasks/main.yml index 54959ea2bc7..219a0288a89 100644 --- a/playbooks/roles/edxapp/tasks/main.yml +++ b/playbooks/roles/edxapp/tasks/main.yml @@ -3,65 +3,222 @@ # - common/tasks/main.yml --- - name: create application user - user: > - name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}" - createhome=no shell=/bin/false - notify: - - "restart edxapp" - - "restart edxapp_workers" + user: + name: "{{ edxapp_user }}" + home: "{{ edxapp_app_dir }}" + createhome: "{{ edxapp_user_createhome }}" + shell: "{{ edxapp_user_shell }}" + tags: + - install + - install:base - name: create edxapp user dirs - file: > - path="{{ item }}" state=directory - owner="{{ edxapp_user }}" group="{{ common_web_group }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" + file: + path: "{{ item.path }}" + state: directory + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: "{{ item.mode | default('0755') }}" with_items: - - "{{ edxapp_app_dir }}" - - "{{ edxapp_data_dir }}" - - "{{ edxapp_venvs_dir }}" - - "{{ edxapp_theme_dir }}" - - "{{ edxapp_staticfile_dir }}" + - { path: "{{ edxapp_app_dir }}" } + # needed for the ansible 1.5 git module + - { path: "{{ edxapp_app_dir }}/.ssh" } + - { path: "{{ edxapp_venvs_dir }}" } + - { path: "{{ edxapp_theme_dir }}" } + - { path: "{{ edxapp_staticfile_dir }}" } + - { path: "{{ edxapp_course_static_dir }}" } + # var should have more permissive permissions than the rest + - { path: "{{ edxapp_data_dir }}", mode: "0775" } + # directory to import the courses from github + - { path: "{{ EDXAPP_GIT_REPO_DIR }}", mode: "0775" } + # directory to export the courses to git + - { path: "{{ EDXAPP_GIT_REPO_EXPORT_DIR }}", mode: "0775" } + tags: + - install + - install:base + +- name: write devstack script + template: + src: "devstack.sh.j2" + dest: "{{ edxapp_app_dir }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: devstack is defined and devstack + tags: + - devstack + - devstack:install + +- name: add paver autocomplete to bashrc + lineinfile: + dest: /root/.bashrc + line: "source {{ edxapp_code_dir }}/scripts/paver_autocomplete.sh" + when: devstack is defined and devstack + tags: + - devstack + - devstack:install - name: create edxapp log dir - file: > - path="{{ edxapp_log_dir }}" state=directory - owner="{{ common_log_user }}" group="{{ common_log_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" + file: + path: "{{ edxapp_log_dir }}" + state: directory + owner: "{{ common_log_user }}" + group: "{{ common_log_user }}" + tags: + - install + - install:base + +- name: Ensure the tracking folder exists + file: + path: "{{ COMMON_LOG_DIR }}/tracking" + state: directory + owner: root + group: root + tags: + - install + - install:base + +- name: Ensure the tracking.log file exists + file: + path: "{{ COMMON_LOG_DIR }}/tracking/tracking.log" + state: touch + owner: syslog + group: adm + mode: "0640" + tags: + - install + - install:base - name: create web-writable edxapp data dirs - file: > - path="{{ item }}" state=directory - owner="{{ common_web_user }}" group="{{ edxapp_user }}" - mode="0775" - notify: - - "restart edxapp" - - "restart edxapp_workers" + file: + path: "{{ item }}" + state: directory + owner: "{{ common_web_user }}" + group: "{{ edxapp_user }}" + mode: "0775" with_items: - "{{ edxapp_course_data_dir }}" - "{{ edxapp_upload_dir }}" + - "{{ edxapp_media_dir }}" + tags: + - install + - install:base - name: install system packages on which LMS and CMS rely - apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present - notify: - - "restart edxapp" - - "restart edxapp_workers" + apt: + name: "{{ item }}" + state: present + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + with_flattened: + - "{{ edxapp_debian_pkgs_default }}" + - "{{ edxapp_debian_pkgs_extra }}" + tags: + - install + - install:base + +- name: Install the gpg key for nodejs LTS + apt_key: + url: "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" + state: present + tags: + - install + - install:base + +- name: Install the nodejs LTS repos + apt_repository: + repo: "deb https://deb.nodesource.com/node_{{ EDXAPP_NODE_VERSION }}.x nodistro main" + state: present + update_cache: yes + register: add_repo + until: add_repo is success + retries: 10 + delay: 5 + tags: + - install + - install:base + +- name: Install node + apt: + name: nodejs + state: present + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + tags: + - install + - install:base + +- name: set up edxapp .npmrc + template: + src: .npmrc.j2 + dest: "{{ edxapp_app_dir }}/.npmrc" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0600 + tags: + - install + - install:base + +# This will install npm EDXAPP_NPM_VERSION to edxapp_npm_bin rather than updating the global npm version installed via apt. +# As edxapp_npm_bin is already part of edxapp_environment, the npm command will always pick up the one installed in edxapp_npm_bin. +- name: Pin npm to {{ EDXAPP_NPM_VERSION }} + shell: "npm install -g npm@{{ EDXAPP_NPM_VERSION }}" + environment: "{{ edxapp_environment | combine(git_ssh_environment_mixin) }}" + become_user: "{{ edxapp_user }}" + tags: + - install + - install:base + +- name: install python3.8 + apt: + pkg: + - python3.8-dev + - python3.8-distutils + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + tags: + - install + - install:system-requirements - name: create log directories for service variants - notify: - - "restart edxapp" - - "restart edxapp_workers" - file: > - path={{ edxapp_log_dir }}/{{ item }} state=directory - owner={{ common_log_user }} group={{ common_log_user }} - mode=0750 - with_items: service_variants_enabled + file: + path: "{{ edxapp_log_dir }}/{{ item }}" + state: directory + owner: "{{ common_log_user }}" + group: "{{ common_log_user }}" + mode: "0750" + with_items: "{{ service_variants_enabled }}" + tags: + - install + - install:base # Set up the python sandbox execution environment - include: python_sandbox_env.yml when: EDXAPP_PYTHON_SANDBOX + tags: + - deploy + +- include: deploy.yml + tags: + - deploy -- include: deploy.yml tags=deploy +- name: Include JWT signature setting in the app config file + include_role: + name: jwt_signature + when: CONFIGURE_JWTS and celery_worker is not defined + vars: + app_name: 'lms' + app_config_file: "{{ COMMON_CFG_DIR }}/lms.yml" + app_config_owner: "{{ edxapp_user }}" + app_config_group: "{{ common_web_group }}" + app_config_mode: 0640 + CAN_GENERATE_NEW_JWT_SIGNATURE: True diff --git a/playbooks/roles/edxapp/tasks/python_sandbox_env.yml b/playbooks/roles/edxapp/tasks/python_sandbox_env.yml index 9d144ad709f..48c61450d37 100644 --- a/playbooks/roles/edxapp/tasks/python_sandbox_env.yml +++ b/playbooks/roles/edxapp/tasks/python_sandbox_env.yml @@ -1,57 +1,120 @@ +--- +# Set the alternatives this way for blas and lapack to work correctly for the +# MITx 6.341x course. +- name: code sandbox | Check which `libblas` to use + stat: + path: /usr/lib/libblas/libblas.so.3gf + register: libblas_file + +- name: code sandbox | Use libblas.so.3gf in Ubuntu + alternatives: + name: libblas.so.3gf + path: /usr/lib/libblas/libblas.so.3gf + when: libblas_file.stat.exists + +- name: code sandbox | Use libblas.so.3 in Ubuntu + alternatives: + name: libblas.so.3 + path: /usr/lib/libblas/libblas.so.3 + when: not libblas_file.stat.exists and (ansible_distribution_release != 'bionic' and ansible_distribution_release != 'focal') + +- name: code sandbox | Use libblas.so.3 in Ubuntu + alternatives: + name: libblas.so.3 + link: /usr/lib/x86_64-linux-gnu/blas/libblas.so.3 + path: /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.7.1 + when: ansible_distribution_release == 'bionic' + +- name: code sandbox | Use libblas.so.3 in Ubuntu + alternatives: + name: libblas.so.3 + link: /usr/lib/x86_64-linux-gnu/blas/libblas.so.3 + path: /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.9.0 + when: ansible_distribution_release == 'focal' + +- name: code sandbox | Check which `liblapac` to use + stat: + path: /usr/lib/lapack/liblapack.so.3gf + register: liblapack_file + +- name: code sandbox | Use liblapack.so.3gf in Ubuntu + alternatives: + name: liblapack.so.3gf + path: /usr/lib/lapack/liblapack.so.3gf + when: liblapack_file.stat.exists + +- name: code sandbox | Use liblapack.so.3 in Ubuntu + alternatives: + name: liblapack.so.3 + path: /usr/lib/lapack/liblapack.so.3 + when: not liblapack_file.stat.exists and (ansible_distribution_release != 'bionic' and ansible_distribution_release != 'focal') + +- name: code sandbox | Use liblapack.so.3 in Ubuntu + alternatives: + name: liblapack.so.3 + link: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3 + path: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.7.1 + when: ansible_distribution_release == 'bionic' + +- name: code sandbox | Use liblapack.so.3 in Ubuntu + alternatives: + name: liblapack.so.3 + link: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3 + path: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.9.0 + when: ansible_distribution_release == 'focal' + - name: code sandbox | Create edxapp sandbox user - user: name={{ edxapp_sandbox_user }} shell=/bin/false home={{ edxapp_sandbox_venv_dir }} - notify: - - "restart edxapp" - - "restart edxapp_workers" + user: + name: "{{ edxapp_sandbox_user }}" + shell: /bin/false + home: "{{ edxapp_sandbox_venv_dir }}" tags: - - edxapp-sandbox + - edxapp-sandbox + - devstack - name: code sandbox | Install apparmor utils system pkg - apt: pkg=apparmor-utils state=present - notify: - - "restart edxapp" - - "restart edxapp_workers" + apt: + name: apparmor-utils + state: present tags: - - edxapp-sandbox + - edxapp-sandbox - name: code sandbox | write out apparmor code sandbox config - template: src=code.sandbox.j2 dest=/etc/apparmor.d/code.sandbox mode=0644 owner=root group=root - notify: - - "restart edxapp" - - "restart edxapp_workers" + template: + src: code.sandbox.j2 + dest: /etc/apparmor.d/code.sandbox + mode: 0644 + owner: root + group: root tags: - - edxapp-sandbox + - edxapp-sandbox - name: code sandbox | write out sandbox user sudoers config - template: src=95-sandbox-sudoer.j2 dest=/etc/sudoers.d/95-{{ edxapp_sandbox_user }} mode=0440 owner=root group=root validate='visudo -c -f %s' - notify: - - "restart edxapp" - - "restart edxapp_workers" + template: + src: 95-sandbox-sudoer.j2 + dest: "/etc/sudoers.d/95-{{ edxapp_sandbox_user }}" + mode: 0440 + owner: root + group: root + validate: 'visudo -c -f %s' tags: - - edxapp-sandbox + - edxapp-sandbox -# we boostrap and enable the apparmor service here. in deploy.yml we disable, deploy, then re-enable +# we boostrap and enable the apparmor service here. In deploy.yml we disable, deploy, then re-enable # so we need to enable it in main.yml - name: code sandbox | start apparmor service - service: name=apparmor state=started - notify: - - "restart edxapp" - - "restart edxapp_workers" + service: + name: apparmor + state: started tags: - - edxapp-sandbox + - edxapp-sandbox - name: code sandbox | (bootstrap) load code sandbox profile command: apparmor_parser -r /etc/apparmor.d/code.sandbox - notify: - - "restart edxapp" - - "restart edxapp_workers" tags: - - edxapp-sandbox + - edxapp-sandbox - name: code sandbox | (bootstrap) put code sandbox into aa-enforce or aa-complain mode depending on EDXAPP_SANDBOX_ENFORCE command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox - notify: - - "restart edxapp" - - "restart edxapp_workers" tags: - - edxapp-sandbox + - edxapp-sandbox diff --git a/playbooks/roles/edxapp/tasks/service_variant_config.yml b/playbooks/roles/edxapp/tasks/service_variant_config.yml index 8dd59b386b0..9bc6e411509 100644 --- a/playbooks/roles/edxapp/tasks/service_variant_config.yml +++ b/playbooks/roles/edxapp/tasks/service_variant_config.yml @@ -1,118 +1,329 @@ -- name: "create {{ item }} application config" - template: > - src={{ item }}.env.json.j2 - dest={{ edxapp_app_dir }}/{{ item }}.env.json - sudo_user: "{{ edxapp_user }}" - with_items: service_variants_enabled - notify: - - "restart edxapp" - - "restart edxapp_workers" - -- name: "create {{ item }} auth file" - template: > - src={{ item }}.auth.json.j2 - dest={{ edxapp_app_dir }}/{{ item }}.auth.json - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - with_items: service_variants_enabled +--- +- name: create application and auth config + template: + src: "{{ item[0] }}.{{ item[1] }}.json.j2" + dest: "{{ edxapp_app_dir }}/{{ item[0] }}.{{ item[1] }}.json" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + when: "EDXAPP_ENABLE_LEGACY_JSON_CONFIGS" + with_nested: + - "{{ service_variants_enabled }}" + - [ 'env', 'auth' ] + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible -# write the supervisor scripts for the service variants +- name: combined lms auth env for yml + set_fact: + lms_combined_config: '{{lms_env_config|combine(lms_auth_config)}}' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: '"lms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs -- name: "writing {{ item }} supervisor script" - template: > - src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf - owner={{ supervisor_user }} - with_items: service_variants_enabled - when: celery_worker is not defined and not devstack - sudo_user: "{{ supervisor_user }}" +- name: render lms yml config # meant to replace existing json config eventually + template: + src: "lms.yml.j2" + dest: "{{ COMMON_CFG_DIR }}/lms.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"lms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs -- name: writing edxapp supervisor script - template: > - src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf - owner={{ supervisor_user }} - when: celery_worker is not defined and not devstack - sudo_user: "{{ supervisor_user }}" +- name: combined cms auth env for yml + set_fact: + cms_combined_config: '{{cms_env_config|combine(cms_auth_config)}}' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: '"cms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + +- name: render studio yml config # meant to replace existing json config eventually + template: + src: "studio.yml.j2" + dest: "{{ COMMON_CFG_DIR }}/studio.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"cms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + +- name: Install decrypt config private key from variable + local_action: + module: copy + content: "{{ DECRYPT_CONFIG_PRIVATE_KEY_VAR }}" + dest: "{{ DECRYPT_CONFIG_PRIVATE_KEY_PATH | default('/var/tmp') }}/private.key" + force: yes + mode: "0644" + become: false + no_log: True + when: EDXAPP_DECRYPT_CONFIG_ENABLED and DECRYPT_CONFIG_PRIVATE_KEY_VAR is defined + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Decrypt lms config + local_action: command asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path {{ ENCRYPTED_CFG_DIR }}/lms.yml --private_key_path {{ DECRYPT_CONFIG_PRIVATE_KEY }} --outfile_path {{ UNENCRYPTED_CFG_DIR }}/lms.yml + become: false + with_items: "{{ service_variants_enabled }}" + when: '"lms" in service_variants_enabled and EDXAPP_DECRYPT_CONFIG_ENABLED' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only + +- name: Decrypt cms config + local_action: command asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path {{ ENCRYPTED_CFG_DIR }}/studio.yml --private_key_path {{ DECRYPT_CONFIG_PRIVATE_KEY }} --outfile_path {{ UNENCRYPTED_CFG_DIR }}/studio.yml + become: false + with_items: "{{ service_variants_enabled }}" + when: '"cms" in service_variants_enabled and EDXAPP_DECRYPT_CONFIG_ENABLED' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Replace deploy host to sandbox dns name + replace: + path: "{{ UNENCRYPTED_CFG_DIR }}/{{ item }}.yml" + regexp: 'deploy_host' + replace: "{{ COMMON_DEPLOY_HOSTNAME }}" + with_items: ['lms','studio'] + when: EDXAPP_DECRYPT_CONFIG_ENABLED and SANDBOX_CONFIG + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + become: false + delegate_to: localhost + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Copy lms config file + copy: + src: "{{ EDXAPP_LMS_LOCAL_CONFIG_FILE }}" + dest: "{{ COMMON_CFG_DIR }}/lms.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"lms" in service_variants_enabled and EDXAPP_COPY_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Copy cms config file + copy: + src: "{{ EDXAPP_CMS_LOCAL_CONFIG_FILE }}" + dest: "{{ COMMON_CFG_DIR }}/studio.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"cms" in service_variants_enabled and EDXAPP_COPY_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Write the revisions config file + template: + src: "revisions.yml.j2" + dest: "{{ edxapp_revision_cfg }}" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + +# write the supervisor scripts for the service variants +- name: "writing {{ item }} supervisor script" + template: + src: "edx/app/supervisor/conf.d.available/{{ item }}.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ item }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ supervisor_user }}" + mode: 0644 + become_user: "{{ supervisor_user }}" + with_items: "{{ service_variants_enabled }}" + tags: + - install + - install:configuration # write the supervisor script for celery workers +- name: writing celery supervisor scripts + template: + src: "edx/app/supervisor/conf.d.available/{{ item }}.j2" + dest: "{{ supervisor_available_dir }}/{{ item }}" + owner: "{{ supervisor_user }}" + group: "{{ supervisor_user }}" + mode: 0644 + become_user: "{{ supervisor_user }}" + with_items: + - workers.conf + tags: + - install + - install:configuration -- name: writing celery worker supervisor script - template: > - src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf - owner={{ supervisor_user }} - when: celery_worker is defined and not devstack - sudo_user: "{{ supervisor_user }}" - -# Fake syncdb with migrate, only when fake_migrations is defined -# This overrides the database name to be the test database which -# the default application user has full write access to -- name: syncdb and migrate - shell: > - chdir={{ edxapp_code_dir }} - {{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate - when: fake_migrations is defined and migrate_db is defined and migrate_db|lower == "yes" - sudo_user: "{{ edxapp_user }}" - environment: - DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -# Regular syncdb with migrate -- name: syncdb and migrate - shell: > - chdir={{ edxapp_code_dir }} - {{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate - when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" - environment: - DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD }}" - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - -# Fake migrate, only when fake_migrations is defined -# This overrides the database name to be the test database which -# the default application user has full write access to -- name: db migrate - shell: > - chdir={{ edxapp_code_dir }} - {{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate - when: fake_migrations is defined and migrate_only is defined and migrate_only|lower == "yes" - sudo_user: "{{ edxapp_user }}" +# clean out an old edxapp.conf file which we don't use now. +# this can be deleted after we build things from scratch. +- name: clean out old edxapp.conf + file: + path: "{{ supervisor_available_dir }}/edxapp.conf" + state: "absent" + tags: + - install + - install:configuration + +- name: add gunicorn configuration files + template: + src: "{{ item }}_gunicorn.py.j2" + dest: "{{ edxapp_app_dir }}/{{ item }}_gunicorn.py" + mode: 0644 + become_user: "{{ edxapp_user }}" + with_items: "{{ service_variants_enabled }}" + tags: + - install + - install:configuration + +# Enable the supervisor jobs +- name: "enable {{ item }} supervisor script" + file: + src: "{{ supervisor_available_dir }}/{{ item }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ item }}.conf" + state: link + force: yes + become_user: "{{ supervisor_user }}" + with_items: "{{ service_variants_enabled }}" + when: celery_worker is not defined and not disable_edx_services + tags: + - install + - install:configuration + +- name: enable celery worker supervisor script + file: + src: "{{ supervisor_available_dir }}/workers.conf" + dest: "{{ supervisor_cfg_dir }}/workers.conf" + state: link + force: yes + become_user: "{{ supervisor_user }}" + when: celery_worker is defined and not disable_edx_services + tags: + - install + - install:configuration + +- name: create helper scripts for managing edxapp + template: + src: "edx/bin/{{ item[0] }}-{{ item[1] }}.j2" + dest: "{{ COMMON_BIN_DIR }}/{{ item[0] }}-{{ item[1] }}" + owner: "{{ edxapp_user }}" + mode: 0755 + with_nested: + - "{{ edxapp_helper_scripts }}" + - "{{ service_variants_enabled }}" + tags: + - install + - install:configuration + +- name: create script to compile and update assets + template: + src: "edx/bin/edxapp-update-assets.j2" + dest: "{{ COMMON_BIN_DIR }}/edxapp-update-assets" + owner: "{{ edxapp_user }}" + mode: 0755 + tags: + - install + - install:configuration + +# migrate when the migrate user is overridden in extra vars +- name: migrate + command: "{{ COMMON_BIN_DIR }}/edxapp-migrate-{{ item }}" + when: migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS and item != "lms-preview" + run_once: yes environment: - DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - - - -# Regular migrations -- name: db migrate - shell: > - chdir={{ edxapp_code_dir }} - {{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate - when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes" - sudo_user: "{{ edxapp_user }}" - notify: - - "restart edxapp" - - "restart edxapp_workers" - - -# Gather assets using rake if possible - -- name: gather {{ item }} static assets with rake - shell: > - SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws - executable=/bin/bash - chdir={{ edxapp_code_dir }} - sudo_user: "{{ edxapp_user }}" - when: celery_worker is not defined and not devstack and item != "lms-preview" - with_items: service_variants_enabled - notify: - - "restart edxapp" - - "restart edxapp_workers" - environment: "{{ edxapp_environment }}" + DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}" + DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}" + with_items: "{{ service_variants_enabled }}" + tags: + - migrate + +# There are problems with django collectstatic copying files. It doesn't retain +# last modified timestamps, but relies on those same timestamps to know if a new file +# should be recopied. While collectstatic --clear exists, it only clears some of the +# files in edxapp_staticfile_dir, it leaves postprocessed or otherwise hashed files. +# This ensures we have a totally clean directory. +- name: Remove and recreate the staticfiles directory so nothing stale can exist + file: + path: "{{ edxapp_staticfile_dir }}" + state: "{{ item }}" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: "0755" + when: celery_worker is not defined and not devstack + with_items: ['absent', 'directory'] + tags: + - gather_static_assets + - assets + +# Gather assets using paver if possible +- name: "gather static assets with paver and pull translations" + command: "{{ COMMON_BIN_DIR }}/edxapp-update-assets" + when: celery_worker is not defined and not devstack + tags: + - gather_static_assets + - assets +- name: Create or update SiteConfiguration + include: site_configuration.yml + when: celery_worker is not defined and EDXAPP_SITE_CONFIGURATION + with_items: "{{ EDXAPP_SITE_CONFIGURATION }}" + become_user: "{{ edxapp_user }}" + tags: + - create_or_update_site_configuration diff --git a/playbooks/roles/edxapp/tasks/site_configuration.yml b/playbooks/roles/edxapp/tasks/site_configuration.yml new file mode 100644 index 00000000000..a30c7de440b --- /dev/null +++ b/playbooks/roles/edxapp/tasks/site_configuration.yml @@ -0,0 +1,42 @@ +--- +- name: Create or update SiteConfiguration + block: + - name: Create the SiteConfiguration JSON file + template: + src: "site_configuration.json.j2" + dest: "/tmp/site_configuration.json" + + - name: Use the site_id if it is provided + set_fact: + site_identifier: "--site-id {{ item.site_id }}" + when: item.site_id is defined and item.domain is not defined + + - name: Use the domain name if it is provided + set_fact: + site_identifier: "{{ item.domain }}" + when: item.domain is defined and item.site_id is not defined + + - name: Fail if both site_id and domain are provided + fail: + msg: "Cannot specify the site_id and domain at the same time in {{ item }}" + when: item.domain is defined and item.site_id is defined + + - name: Get the default SITE_ID + shell: ". {{ edxapp_app_dir }}/edxapp_env && {{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py lms print_setting SITE_ID 2>/dev/null" + register: default_site_id + when: item.site_id is not defined and item.domain is not defined + + - name: Use the default SITE_ID as the site identifier + set_fact: + site_identifier: "--site-id {{ default_site_id.stdout }}" + when: item.site_id is not defined and item.domain is not defined + + - name: Run create_or_update_site_configuration + shell: | + . {{ edxapp_app_dir }}/edxapp_env + {{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py lms create_or_update_site_configuration -f /tmp/site_configuration.json --enabled {{ site_identifier }} + + - name: Remove the generated SiteConfiguration JSON file + file: + path: "/tmp/site_configuration.json" + state: absent diff --git a/playbooks/roles/edxapp/tasks/tag_ec2.yml b/playbooks/roles/edxapp/tasks/tag_ec2.yml new file mode 100644 index 00000000000..5d3b22a82ee --- /dev/null +++ b/playbooks/roles/edxapp/tasks/tag_ec2.yml @@ -0,0 +1,19 @@ +--- +- name: get instance information + action: ec2_metadata_facts + +- name: tag instance with edx_platform version + ec2_tag: + resource: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + tags: + "version:edx_platform" : "{{ edx_platform_repo }} {{ edxapp_platform_checkout.after }}" + when: edxapp_platform_checkout.after is defined + +- name: tag instance with edxapp theme version + ec2_tag: + resource: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + tags: + "version:edxapp_theme" : "{{ edxapp_theme_source_repo }} {{ edxapp_theme_checkout.after }}" + when: edxapp_theme_checkout.after is defined diff --git a/playbooks/roles/edxapp/templates/.npmrc.j2 b/playbooks/roles/edxapp/templates/.npmrc.j2 new file mode 100644 index 00000000000..70e815aa876 --- /dev/null +++ b/playbooks/roles/edxapp/templates/.npmrc.j2 @@ -0,0 +1,2 @@ +registry={{ COMMON_NPM_MIRROR_URL }} +prefix={{ edxapp_npm_dir }} diff --git a/playbooks/roles/edxapp/templates/95-sandbox-sudoer.j2 b/playbooks/roles/edxapp/templates/95-sandbox-sudoer.j2 index c0a79dbfa4f..b291add83a0 100644 --- a/playbooks/roles/edxapp/templates/95-sandbox-sudoer.j2 +++ b/playbooks/roles/edxapp/templates/95-sandbox-sudoer.j2 @@ -1,3 +1,11 @@ +{% if devstack %} {{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python +{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find /tmp/codejail-*/tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} ; {{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill {{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill +{% else %} +{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python +{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find /tmp/codejail-*/tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} ; +{{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill +{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill +{% endif %} diff --git a/playbooks/roles/edxapp/templates/boto.j2 b/playbooks/roles/edxapp/templates/boto.j2 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/playbooks/roles/edxapp/templates/cms.auth.json.j2 b/playbooks/roles/edxapp/templates/cms.auth.json.j2 index 6f1859f1683..e0f1351d395 100644 --- a/playbooks/roles/edxapp/templates/cms.auth.json.j2 +++ b/playbooks/roles/edxapp/templates/cms.auth.json.j2 @@ -1,2 +1,2 @@ -{% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %} +{% do cms_auth_config.update(EDXAPP_CMS_AUTH_EXTRA) %} {{ cms_auth_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/cms.conf.j2 b/playbooks/roles/edxapp/templates/cms.conf.j2 deleted file mode 100644 index 18873b8ac6e..00000000000 --- a/playbooks/roles/edxapp/templates/cms.conf.j2 +++ /dev/null @@ -1,14 +0,0 @@ -[program:cms] -{% if ansible_processor|length > 0 %} -command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi -{% else %} -command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi -{% endif %} - -user={{ common_web_user }} -directory={{ edxapp_code_dir }} -environment=PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms" -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/edxapp/templates/cms.env.json.j2 b/playbooks/roles/edxapp/templates/cms.env.json.j2 index f24a6d9dcbb..3522b735946 100644 --- a/playbooks/roles/edxapp/templates/cms.env.json.j2 +++ b/playbooks/roles/edxapp/templates/cms.env.json.j2 @@ -1,2 +1,2 @@ -{% do cms_env_config.update(EDXAPP_ENV_EXTRA) %} +{% do cms_env_config.update(EDXAPP_CMS_ENV_EXTRA) %} {{ cms_env_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/cms_gunicorn.py.j2 b/playbooks/roles/edxapp/templates/cms_gunicorn.py.j2 new file mode 100644 index 00000000000..8683ad6d20d --- /dev/null +++ b/playbooks/roles/edxapp/templates/cms_gunicorn.py.j2 @@ -0,0 +1,31 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/stable/configure.html + +{{ ansible_managed }} +""" +import multiprocessing + +preload_app = False +timeout = {{ EDXAPP_CMS_GUNICORN_TIMEOUT }} +bind = "{{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }}" +pythonpath = "{{ edxapp_code_dir }}" +limit_request_field_size = 16384 + +{% if EDXAPP_CMS_MAX_REQ -%} +max_requests = {{ EDXAPP_CMS_MAX_REQ }} +{% endif -%} + +{% if EDXAPP_WORKERS %} +workers = {{ EDXAPP_WORKERS.cms }} +{% else %} +workers = (multiprocessing.cpu_count()-1) * {{ worker_core_mult.cms }} + {{ worker_core_mult.cms }} +{% endif %} + +{{ common_pre_request }} + +{{ common_close_all_caches }} + +def post_fork(server, worker): + close_all_caches() + +{{ EDXAPP_CMS_GUNICORN_EXTRA_CONF }} diff --git a/playbooks/roles/edxapp/templates/code.sandbox.j2 b/playbooks/roles/edxapp/templates/code.sandbox.j2 index 71e9e10a112..719b94db2bd 100644 --- a/playbooks/roles/edxapp/templates/code.sandbox.j2 +++ b/playbooks/roles/edxapp/templates/code.sandbox.j2 @@ -1,24 +1,41 @@ #include -{{ edxapp_sandbox_venv_dir }}/bin/python flags=(complain) { +{{ edxapp_sandbox_venv_dir }}/bin/* { #include {{ edxapp_sandbox_venv_dir }}/** mr, - {{ edxapp_code_dir }}/common/lib/sandbox-packages/** r, /tmp/codejail-*/ rix, - /tmp/codejail-*/** rix, + /tmp/codejail-*/** wrix, # - # Whitelist particiclar shared objects from the system + # Whitelist particular shared objects from the system # python installation # - /usr/lib/python2.7/lib-dynload/_json.so mr, - /usr/lib/python2.7/lib-dynload/_ctypes.so mr, - /usr/lib/python2.7/lib-dynload/_heapq.so mr, - /usr/lib/python2.7/lib-dynload/_io.so mr, - /usr/lib/python2.7/lib-dynload/_csv.so mr, - /usr/lib/python2.7/lib-dynload/datetime.so mr, - /usr/lib/python2.7/lib-dynload/_elementtree.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_json.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_ctypes.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_heapq.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_io.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_csv.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/datetime.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_elementtree.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/pyexpat.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/future_builtins.so mr, + + # Matplot lib needs a place for temp caches + {{ edxapp_sandbox_venv_dir }}/.config/ wrix, + {{ edxapp_sandbox_venv_dir }}/.cache/ wrix, + {{ edxapp_sandbox_venv_dir }}/.config/** wrix, + {{ edxapp_sandbox_venv_dir }}/.cache/** wrix, + + # Matplotlib related libraries + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/termios.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/parser.so mr, + + # Matplot lib needs fonts to make graphs + /usr/share/fonts/ r, + /usr/share/fonts/** r, + /usr/local/share/fonts/ r, + /usr/local/share/fonts/** r, # # Allow access to selections from /proc diff --git a/playbooks/roles/edxapp/templates/course.xml.j2 b/playbooks/roles/edxapp/templates/course.xml.j2 new file mode 100644 index 00000000000..ec9aef96048 --- /dev/null +++ b/playbooks/roles/edxapp/templates/course.xml.j2 @@ -0,0 +1 @@ + diff --git a/playbooks/roles/edxapp/templates/devstack.sh.j2 b/playbooks/roles/edxapp/templates/devstack.sh.j2 new file mode 100644 index 00000000000..8e0cffbf9b8 --- /dev/null +++ b/playbooks/roles/edxapp/templates/devstack.sh.j2 @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ edxapp_app_dir }}/edxapp_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . {{ edxapp_venv_bin }}/activate + cd {{ edxapp_code_dir }} + + /bin/bash + ;; + exec) + shift + + . {{ edxapp_venv_bin }}/activate + cd {{ edxapp_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/beat_scheduler.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/beat_scheduler.sh.j2 new file mode 100644 index 00000000000..88d764c41f5 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/beat_scheduler.sh.j2 @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edxapp_venv_bin = edxapp_venv_dir + "/bin" %} +source {{ edxapp_app_dir }}/edxapp_env +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + edxapp_venv_bin + '/single-beat ' + edxapp_venv_bin + '/celery' %} + +export NEW_RELIC_CONFIG_FILE="{{ edxapp_app_dir }}/newrelic.ini" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +{% else %} +{% set executable = edxapp_venv_bin + '/single-beat ' + edxapp_venv_bin + '/celery' %} +{% endif %} + +# We exec so that celery is the child of supervisor and can be managed properly + +exec {{ executable }} --config="${SERVICE_CONFIG}" beat $@ diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/cms.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/cms.sh.j2 new file mode 100644 index 00000000000..e92a73db9e1 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/cms.sh.j2 @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edxapp_venv_bin = edxapp_venv_dir + "/bin" %} + +{% set executable = edxapp_venv_bin + '/gunicorn' %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + executable %} + +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ EDXAPP_CMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" +export NEW_RELIC_APP_NAME="{{ EDXAPP_NEWRELIC_CMS_APPNAME }}" +export NEW_RELIC_CONFIG_FILE="{{ edxapp_app_dir }}/newrelic.ini" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = edxapp_venv_bin + '/ddtrace-run ' + executable %} +export DD_TAGS="service:edxapp-cms" +export DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true +# Copied from edx_django_service playbook for consistency; Datadog +# trace debug logging issue doesn't actually affect edxapp for some +# reason. +export DD_TRACE_LOG_STREAM_HANDLER=false +# Datadog's instrumentation breaks pymongo: https://github.com/edx/edx-arch-experiments/issues/580 +export DD_TRACE_PYMONGO_ENABLED=false +{% endif -%} + +export PORT="{{ edxapp_cms_gunicorn_port }}" +export ADDRESS="{{ edxapp_cms_gunicorn_host }}" +export LANG="{{ EDXAPP_LANG }}" +export DJANGO_SETTINGS_MODULE="{{ EDXAPP_CMS_ENV }}" +export SERVICE_VARIANT="cms" +export PATH="{{ edxapp_deploy_path }}" +export BOTO_CONFIG="{{ edxapp_app_dir }}/.boto" +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-studio" + +source {{ edxapp_app_dir }}/edxapp_env +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} -c {{ edxapp_app_dir }}/cms_gunicorn.py {{ EDXAPP_CMS_GUNICORN_EXTRA }} cms.wsgi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/lms.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/lms.sh.j2 new file mode 100644 index 00000000000..e514b406148 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/lms.sh.j2 @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edxapp_venv_bin = edxapp_venv_dir + "/bin" %} + +{% set executable = edxapp_venv_bin + '/gunicorn' %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + executable %} + +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ EDXAPP_LMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" +export NEW_RELIC_APP_NAME="{{ EDXAPP_NEWRELIC_LMS_APPNAME }}" +export NEW_RELIC_CONFIG_FILE="{{ edxapp_app_dir }}/newrelic.ini" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = edxapp_venv_bin + '/ddtrace-run ' + executable %} +export DD_TAGS="service:edxapp-lms" +export DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true +# Copied from edx_django_service playbook for consistency; Datadog +# trace debug logging issue doesn't actually affect edxapp for some +# reason. +export DD_TRACE_LOG_STREAM_HANDLER=false +# Datadog's instrumentation breaks pymongo: https://github.com/edx/edx-arch-experiments/issues/580 +export DD_TRACE_PYMONGO_ENABLED=false +{% endif -%} + +export PORT="{{ edxapp_lms_gunicorn_port }}" +export ADDRESS="{{ edxapp_lms_gunicorn_host }}" +export LANG="{{ EDXAPP_LANG }}" +export DJANGO_SETTINGS_MODULE="{{ EDXAPP_LMS_ENV }}" +export SERVICE_VARIANT="lms" +export PATH="{{ edxapp_deploy_path }}" +export BOTO_CONFIG="{{ edxapp_app_dir }}/.boto" +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-lms" + +source {{ edxapp_app_dir }}/edxapp_env +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} -c {{ edxapp_app_dir }}/lms_gunicorn.py lms.wsgi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_cms_config.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_cms_config.sh.j2 new file mode 100644 index 00000000000..462e66e2804 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_cms_config.sh.j2 @@ -0,0 +1,13 @@ +#jinja2:trim_blocks: False +{# Have to disable trim blocks or else fi at end of file is not on it's own line #} +#!/bin/bash + +# Reload Studio gunicorn if this machine has the Studio frontend +if [ -e '/edx/app/supervisor/conf.d/cms.conf' ]; then + /edx/bin/supervisorctl signal HUP cms +fi + +# Reload Studio workers if this machine has workers +if [ -e '/edx/app/supervisor/conf.d/workers.conf' ]; then + /edx/bin/supervisorctl signal HUP {% for w in edxapp_workers %}{% if w.service_variant == 'cms' %}edxapp_worker:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} +fi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_lms_config.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_lms_config.sh.j2 new file mode 100644 index 00000000000..f2cee821358 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_lms_config.sh.j2 @@ -0,0 +1,13 @@ +#jinja2:trim_blocks: False +{# Have to disable trim blocks or else fi at end of file is not on it's own line #} +#!/bin/bash + +# Reload LMS gunicorn if this machine has the LMS frontend +if [ -e '/edx/app/supervisor/conf.d/lms.conf' ]; then + /edx/bin/supervisorctl signal HUP lms +fi + +# Reload LMS workers if this machine has workers +if [ -e '/edx/app/supervisor/conf.d/workers.conf' ]; then + /edx/bin/supervisorctl signal HUP {% for w in edxapp_workers %}{% if w.service_variant == 'lms' %}edxapp_worker:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} +fi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/worker.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/worker.sh.j2 new file mode 100644 index 00000000000..ccd903b8b5b --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/worker.sh.j2 @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edxapp_venv_bin = edxapp_venv_dir + "/bin" %} +source {{ edxapp_app_dir }}/edxapp_env +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + edxapp_venv_bin + '/celery' %} + +export NEW_RELIC_CONFIG_FILE="{{ edxapp_app_dir }}/newrelic.ini" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +{% else %} +{% set executable = edxapp_venv_bin + '/celery' %} +{% endif %} + +# We exec so that celery is the child of supervisor and can be managed properly + +exec {{ executable }} $@ diff --git a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/cms.conf.j2 b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/cms.conf.j2 new file mode 100644 index 00000000000..5aeb626416f --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/cms.conf.j2 @@ -0,0 +1,11 @@ +[program:cms] + + +command={{ edxapp_app_dir }}/cms.sh + +user={{ common_web_user }} +directory={{ edxapp_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/lms.conf.j2 b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/lms.conf.j2 new file mode 100644 index 00000000000..aa468ec1edc --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/lms.conf.j2 @@ -0,0 +1,10 @@ +[program:lms] + +command={{ edxapp_app_dir }}/lms.sh + +user={{ common_web_user }} +directory={{ edxapp_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/workers.conf.j2 b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/workers.conf.j2 new file mode 100644 index 00000000000..3dd9641c9a7 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/workers.conf.j2 @@ -0,0 +1,59 @@ +{% for w in edxapp_workers %} +[program:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}] + +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }}-{{ w.service_variant }},NEW_RELIC_DISTRIBUTED_TRACING_ENABLED={{ EDXAPP_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ w.service_variant }}.envs.{{ worker_django_settings_module }},LANG={{ EDXAPP_LANG }},PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }},BOTO_CONFIG="{{ edxapp_app_dir }}/.boto",EDX_REST_API_CLIENT_NAME=edx.{{ w.service_variant }}.core.{{ w.queue }} +user={{ common_web_user }} +directory={{ edxapp_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log + +command={{ edxapp_app_dir }}/worker.sh --app={{ w.service_variant }}.celery:APP worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--max-tasks-per-child ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} {{ '--without-heartbeat' if not EDXAPP_CELERY_HEARTBEAT_ENABLED|bool else '' }} {{ '-O ' + w.prefetch_optimization if w.prefetch_optimization is defined else '' }} +killasgroup=true +stopwaitsecs={{ w.stopwaitsecs | default(EDXAPP_WORKER_DEFAULT_STOPWAITSECS) }} + +{% endfor %} + +[group:edxapp_worker] +programs={%- for w in edxapp_workers %}{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}{%- if not loop.last %},{%- endif %}{%- endfor %} + + +{% if EDXAPP_ENABLE_CELERY_BEAT|bool and EDXAPP_CELERY_BROKER_TRANSPORT == "redis" %} +[program:celerybeat_scheduler] +environment= + {% if COMMON_ENABLE_NEWRELIC_APP %} + NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }}-lms, + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED={{ EDXAPP_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}, + NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }}, + {% endif -%} + LANG={{ EDXAPP_LANG }}, + PYTHONPATH={{ edxapp_code_dir }}, + SERVICE_VARIANT=lms, + BOTO_CONFIG="{{ edxapp_app_dir }}/.boto", + EDX_REST_API_CLIENT_NAME=edx.lms.core.default, + SINGLE_BEAT_LOCK_TIME={{ EDXAPP_SINGLE_BEAT_LOCK_TIME }}, + SINGLE_BEAT_HEARTBEAT_INTERVAL={{ EDXAPP_SINGLE_BEAT_HEARTBEAT_INTERVAL }}, + SINGLE_BEAT_IDENTIFIER="{{ EDXAPP_SINGLE_BEAT_IDENTIFIER }}", + SINGLE_BEAT_REDIS_SERVER="redis://{{ EDXAPP_SINGLE_BEAT_USER }}:{{ EDXAPP_SINGLE_BEAT_PASSWORD }}@{{ EDXAPP_CELERY_BROKER_HOSTNAME }}/{{ EDXAPP_CELERY_BROKER_VHOST }}", + SINGLE_BEAT_WAIT_MODE="supervised", + SERVICE_CONFIG="lms.envs.{{ worker_django_settings_module }}" +user={{ common_web_user }} +directory={{ edxapp_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log + +command={{ edxapp_app_dir }}/beat_scheduler.sh --loglevel=info --schedule="{{ supervisor_log_dir }}/celerybeat-schedule" --pidfile="{{ supervisor_log_dir }}/celerybeat.pid" + +killasgroup=true +stopwaitsecs={{ EDXAPP_WORKER_DEFAULT_STOPWAITSECS }} + +# If multiple app servers are running, celery-beat will exit on the new app +# server as it is already running on the old one. Once the process exited with +# status 0, it wont restart again event the old server is deprovisioned. To +# resolve this conflict, use `autorestart=true` to keep trying to start the +# process. Startretries are scheduled 3 times by default; since there is no way +# to set infinite retries, the recommended way is setting a high number. When +# the previous server is stopped, celery-beat will start normally. +autorestart=true +startsecs=30 +startretries=10000 +{% endif %} diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-migrate-cms.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-migrate-cms.j2 new file mode 100644 index 00000000000..57b4d23d1f5 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-migrate-cms.j2 @@ -0,0 +1,28 @@ +{% include "edxapp_common.j2" %} + +if [[ -z "${NO_EDXAPP_SUDO:-}" ]]; then + SUDO='sudo -E -u {{ edxapp_user }} env "PATH=$PATH"' +fi + +remove_unwanted_args () { + ARGS=("") + args_to_remove="(--list|--noinput)" + for var in "$@"; do + # Ignore known unneeded arguments + if [[ "$var" =~ $args_to_remove ]]; then + continue + fi + ARGS+=("$var") + done +} + +{% for db in cms_auth_config.DATABASES.keys() %} + {%- if db != 'read_replica' %} +if [[ $@ =~ .*--list.* ]]; then + remove_unwanted_args $@ + ${SUDO:-} {{ edxapp_venv_bin }}/python manage.py cms showmigrations --database {{ db }} --settings $EDX_PLATFORM_SETTINGS ${ARGS[@]} +else + ${SUDO:-} {{ edxapp_venv_bin }}/python manage.py cms migrate --database {{ db }} --noinput --settings $EDX_PLATFORM_SETTINGS $@ +fi + {% endif %} +{% endfor %} diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-migrate-lms.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-migrate-lms.j2 new file mode 100644 index 00000000000..c3222799b09 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-migrate-lms.j2 @@ -0,0 +1,28 @@ +{% include "edxapp_common.j2" %} + +if [[ -z "${NO_EDXAPP_SUDO:-}" ]]; then + SUDO='sudo -E -u {{ edxapp_user }} env "PATH=$PATH"' +fi + +remove_unwanted_args () { + ARGS=("") + args_to_remove="(--list|--noinput)" + for var in "$@"; do + # Ignore known unneeded arguments + if [[ "$var" =~ $args_to_remove ]]; then + continue + fi + ARGS+=("$var") + done +} + +{% for db in lms_auth_config.DATABASES.keys() %} + {%- if db != 'read_replica' %} +if [[ $@ =~ .*--list.* ]]; then + remove_unwanted_args $@ + ${SUDO:-} {{ edxapp_venv_bin }}/python manage.py lms showmigrations --database {{ db }} --settings $EDX_PLATFORM_SETTINGS ${ARGS[@]} +else + ${SUDO:-} {{ edxapp_venv_bin }}/python manage.py lms migrate --database {{ db }} --noinput --settings $EDX_PLATFORM_SETTINGS $@ +fi + {% endif %} +{% endfor %} diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-runserver-cms.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-runserver-cms.j2 new file mode 100644 index 00000000000..cea2acde52d --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-runserver-cms.j2 @@ -0,0 +1,3 @@ +{% include "edxapp_common.j2" %} + +sudo -E -u {{ edxapp_user }} env "PATH=$PATH" {{ edxapp_venv_bin}}/python manage.py cms runserver {{ edxapp_cms_gunicorn_port }} --settings $EDX_PLATFORM_SETTINGS diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-runserver-lms.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-runserver-lms.j2 new file mode 100644 index 00000000000..e90545b21c5 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-runserver-lms.j2 @@ -0,0 +1,3 @@ +{% include "edxapp_common.j2" %} + +sudo -E -u {{ edxapp_user }} env "PATH=$PATH" {{ edxapp_venv_bin}}/python manage.py lms runserver {{ edxapp_lms_gunicorn_port }} --settings $EDX_PLATFORM_SETTINGS diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-shell-cms.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-shell-cms.j2 new file mode 100644 index 00000000000..126d31b39c2 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-shell-cms.j2 @@ -0,0 +1,3 @@ +{% include "edxapp_common.j2" %} + +sudo -E -u {{ edxapp_user }} env "PATH=$PATH" {{ edxapp_venv_bin}}/python manage.py cms shell --settings $EDX_PLATFORM_SETTINGS diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-shell-lms.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-shell-lms.j2 new file mode 100644 index 00000000000..563075969b8 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-shell-lms.j2 @@ -0,0 +1,3 @@ +{% include "edxapp_common.j2" %} + +sudo -E -u {{ edxapp_user }} env "PATH=$PATH" {{ edxapp_venv_bin}}/python manage.py lms shell --settings $EDX_PLATFORM_SETTINGS diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-update-assets.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-update-assets.j2 new file mode 100644 index 00000000000..fe3dc8fc89d --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-update-assets.j2 @@ -0,0 +1,13 @@ +{% include "edxapp_common.j2" %} + +{% if edxapp_staticfiles_storage_overrides %} +{% for override in edxapp_staticfiles_storage_overrides %} +sudo -E -H -u {{ edxapp_user }} \ + env "PATH=$PATH" "STATICFILES_STORAGE={{ override }}" \ + {{ edxapp_venv_bin }}/paver update_assets --debug-collect --settings=$EDX_PLATFORM_SETTINGS +{% endfor %} +{% else %} +sudo -E -H -u {{ edxapp_user }} \ + env "PATH=$PATH" \ + {{ edxapp_venv_bin }}/paver update_assets --debug-collect --settings $EDX_PLATFORM_SETTINGS +{% endif %} diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp_common.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp_common.j2 new file mode 100644 index 00000000000..ef10813fd3c --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp_common.j2 @@ -0,0 +1,15 @@ +#!/bin/bash + +# Error out when any command fails. For the migration scripts migrating multiple +# databases this ensure migration errors for any database will be seen by Ansible. +set -euo pipefail + +cd {{ edxapp_code_dir }} +source {{ edxapp_app_dir }}/edxapp_env + +# The default settings set in edxapp_env can be overridden +# using the var $EDX_PLATFORM_SETTINGS_OVERRIDE + +if [[ -n "${EDX_PLATFORM_SETTINGS_OVERRIDE:-}" ]]; then + export EDX_PLATFORM_SETTINGS="$EDX_PLATFORM_SETTINGS_OVERRIDE" +fi diff --git a/playbooks/roles/edxapp/templates/edxapp.conf.j2 b/playbooks/roles/edxapp/templates/edxapp.conf.j2 deleted file mode 100644 index 9debab9fd93..00000000000 --- a/playbooks/roles/edxapp/templates/edxapp.conf.j2 +++ /dev/null @@ -1,2 +0,0 @@ -[group:edxapp] -programs={{ ",".join(service_variants_enabled) }} diff --git a/playbooks/roles/edxapp/templates/etc/sudoers.d/99-automator-edxapp-server.j2 b/playbooks/roles/edxapp/templates/etc/sudoers.d/99-automator-edxapp-server.j2 deleted file mode 100644 index a3ca57b8aa6..00000000000 --- a/playbooks/roles/edxapp/templates/etc/sudoers.d/99-automator-edxapp-server.j2 +++ /dev/null @@ -1,5 +0,0 @@ -automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py migrate * -automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py seed_permissions_roles * -automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py set_staff * -automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py transfer_students * - diff --git a/playbooks/roles/edxapp/templates/git_ssh.sh.j2 b/playbooks/roles/edxapp/templates/git_ssh.sh.j2 new file mode 100644 index 00000000000..230fa8646db --- /dev/null +++ b/playbooks/roles/edxapp/templates/git_ssh.sh.j2 @@ -0,0 +1,6 @@ +#!/bin/sh +{% if EDXAPP_USE_GIT_IDENTITY %} +exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ edxapp_git_identity }} "$@" +{% else %} +exec /usr/bin/ssh -o StrictHostKeyChecking=no "$@" +{% endif %} \ No newline at end of file diff --git a/playbooks/roles/edxapp/templates/git_ssh_auth.sh.j2 b/playbooks/roles/edxapp/templates/git_ssh_auth.sh.j2 deleted file mode 100644 index a1dbe93ae48..00000000000 --- a/playbooks/roles/edxapp/templates/git_ssh_auth.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ edxapp_git_identity }} "$@" diff --git a/playbooks/roles/edxapp/templates/git_ssh_noauth.sh.j2 b/playbooks/roles/edxapp/templates/git_ssh_noauth.sh.j2 deleted file mode 100644 index e30af2deeb1..00000000000 --- a/playbooks/roles/edxapp/templates/git_ssh_noauth.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no "$@" diff --git a/playbooks/roles/edxapp/templates/lms-preview.auth.json.j2 b/playbooks/roles/edxapp/templates/lms-preview.auth.json.j2 deleted file mode 100644 index 833b8d835b2..00000000000 --- a/playbooks/roles/edxapp/templates/lms-preview.auth.json.j2 +++ /dev/null @@ -1,2 +0,0 @@ -{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %} -{{ lms_preview_auth_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/lms-preview.conf.j2 b/playbooks/roles/edxapp/templates/lms-preview.conf.j2 deleted file mode 100644 index 3d863567303..00000000000 --- a/playbooks/roles/edxapp/templates/lms-preview.conf.j2 +++ /dev/null @@ -1,14 +0,0 @@ -[program:lms-preview] -{% if ansible_processor|length > 0 %} -command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi -{% else %} -command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi -{% endif %} - -user={{ common_web_user }} -directory={{ edxapp_code_dir }} -environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms-preview" -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/edxapp/templates/lms-preview.env.json.j2 b/playbooks/roles/edxapp/templates/lms-preview.env.json.j2 deleted file mode 100644 index 959b19ce4ae..00000000000 --- a/playbooks/roles/edxapp/templates/lms-preview.env.json.j2 +++ /dev/null @@ -1,2 +0,0 @@ -{% do lms_preview_env_config.update(EDXAPP_ENV_EXTRA) %} -{{ lms_preview_env_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/lms.auth.json.j2 b/playbooks/roles/edxapp/templates/lms.auth.json.j2 index 3b547a531d6..c43ef92abdd 100644 --- a/playbooks/roles/edxapp/templates/lms.auth.json.j2 +++ b/playbooks/roles/edxapp/templates/lms.auth.json.j2 @@ -1,2 +1,2 @@ -{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %} +{% do lms_auth_config.update(EDXAPP_LMS_AUTH_EXTRA) %} {{ lms_auth_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/lms.conf.j2 b/playbooks/roles/edxapp/templates/lms.conf.j2 deleted file mode 100644 index 9b5a836b6d3..00000000000 --- a/playbooks/roles/edxapp/templates/lms.conf.j2 +++ /dev/null @@ -1,14 +0,0 @@ -[program:lms] -{% if ansible_processor|length > 0 %} -command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi -{% else %} -command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi -{% endif %} - -user={{ common_web_user }} -directory={{ edxapp_code_dir }} -environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms" -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/edxapp/templates/lms.env.json.j2 b/playbooks/roles/edxapp/templates/lms.env.json.j2 index efdadf3cdcb..7f4fbeb0449 100644 --- a/playbooks/roles/edxapp/templates/lms.env.json.j2 +++ b/playbooks/roles/edxapp/templates/lms.env.json.j2 @@ -1,2 +1,2 @@ -{% do lms_env_config.update(EDXAPP_ENV_EXTRA) %} +{% do lms_env_config.update(EDXAPP_LMS_ENV_EXTRA) %} {{ lms_env_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/lms.yml.j2 b/playbooks/roles/edxapp/templates/lms.yml.j2 new file mode 100644 index 00000000000..8f9b569db83 --- /dev/null +++ b/playbooks/roles/edxapp/templates/lms.yml.j2 @@ -0,0 +1,4 @@ +{% if lms_combined_config %} +{% do lms_combined_config.update(EDXAPP_LMS_ENV_EXTRA) %} +{{ lms_combined_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/edxapp/templates/lms_gunicorn.py.j2 b/playbooks/roles/edxapp/templates/lms_gunicorn.py.j2 new file mode 100644 index 00000000000..cf1822ac2e6 --- /dev/null +++ b/playbooks/roles/edxapp/templates/lms_gunicorn.py.j2 @@ -0,0 +1,31 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/stable/configure.html + +{{ ansible_managed }} +""" +import multiprocessing + +preload_app = False +timeout = {{ EDXAPP_LMS_GUNICORN_TIMEOUT }} +bind = "{{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }}" +pythonpath = "{{ edxapp_code_dir }}" +limit_request_field_size = 16384 + +{% if EDXAPP_LMS_MAX_REQ -%} +max_requests = {{ EDXAPP_LMS_MAX_REQ }} +{% endif -%} + +{% if EDXAPP_WORKERS %} +workers = {{ EDXAPP_WORKERS.lms }} +{% else %} +workers = (multiprocessing.cpu_count()-1) * {{ worker_core_mult.lms }} + {{ worker_core_mult.lms }} +{% endif %} + +{{ common_pre_request }} + +{{ common_close_all_caches }} + +def post_fork(server, worker): + close_all_caches() + +{{ EDXAPP_LMS_GUNICORN_EXTRA_CONF }} diff --git a/playbooks/roles/edxapp/templates/newrelic.ini.j2 b/playbooks/roles/edxapp/templates/newrelic.ini.j2 new file mode 100644 index 00000000000..26bb86bf2a3 --- /dev/null +++ b/playbooks/roles/edxapp/templates/newrelic.ini.j2 @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------------- +# edX Note +# +# New Relic allows us to specify the Python Agent config options in a number of +# places. For most functionality, you can get away with a combination of +# environment variables (see the lms.conf.j2 in the configuration repo) and New +# Relic's server-side configuration (the "settings" on your app in their +# monitoring web interface). That being said, some advanced settings can only +# be set in this file or manually in code. That's why this config file is so +# bare -- it specifies only what you *can't* specify in the other two places. +# +# Note that you can always get an up to date snapshot of your combined config +# by going to New Relic's web interface and selecting your app's: +# Settings > Environment > Agent Initialization + +[newrelic] + +# Turns on or turns off all attributes for browser monitoring. This is the data +# that gets sent to the PageView destination in New Relic Insights. If +# attributes.enabled is false, no attributes will be sent to browser monitoring +# regardless of how this configuration setting is set. +# +# Enabling browser_monitoring.attributes.enabled=true means that we can add a +# custom parameter in our Python code via newrelic.agent.add_custom_parameter() +# and have that show up in PageViews (by default, it only shows up in +# Transactions). This lets us do things like facet front end load time by +# `course_id`. +# +browser_monitoring.attributes.enabled=true diff --git a/playbooks/roles/edxapp/templates/revisions.yml.j2 b/playbooks/roles/edxapp/templates/revisions.yml.j2 new file mode 100644 index 00000000000..043bac70c21 --- /dev/null +++ b/playbooks/roles/edxapp/templates/revisions.yml.j2 @@ -0,0 +1,3 @@ +{% if edxapp_revisions_config %} +{{ edxapp_revisions_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/edxapp/templates/site_configuration.json.j2 b/playbooks/roles/edxapp/templates/site_configuration.json.j2 new file mode 100644 index 00000000000..2fae98836c8 --- /dev/null +++ b/playbooks/roles/edxapp/templates/site_configuration.json.j2 @@ -0,0 +1 @@ +{{ item['values'] | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/studio.yml.j2 b/playbooks/roles/edxapp/templates/studio.yml.j2 new file mode 100644 index 00000000000..08f5c79cd08 --- /dev/null +++ b/playbooks/roles/edxapp/templates/studio.yml.j2 @@ -0,0 +1,4 @@ +{% if cms_combined_config %} +{% do cms_combined_config.update(EDXAPP_CMS_ENV_EXTRA) %} +{{ cms_combined_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/edxapp/templates/workers.conf.j2 b/playbooks/roles/edxapp/templates/workers.conf.j2 deleted file mode 100644 index 846689607e2..00000000000 --- a/playbooks/roles/edxapp/templates/workers.conf.j2 +++ /dev/null @@ -1,17 +0,0 @@ -{% for w in edxapp_workers %} -[program:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}] - -environment=CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE=aws,PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }} -user={{ common_web_user }} -directory={{ edxapp_code_dir }} -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log - -command={{ edxapp_venv_bin}}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.{{ ansible_hostname }} --concurrency={{ w.concurrency }} -killasgroup=true -stopasgroup=true - -{% endfor %} - -[group:edxapp_worker] -programs={%- for w in edxapp_workers %}{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}{%- if not loop.last %},{%- endif %}{%- endfor %} diff --git a/playbooks/roles/edxapp/vars/devstack.yml b/playbooks/roles/edxapp/vars/devstack.yml new file mode 100644 index 00000000000..a840c33ad12 --- /dev/null +++ b/playbooks/roles/edxapp/vars/devstack.yml @@ -0,0 +1,9 @@ +--- +# The only difference between these requirements and the role defaults is the +# use of "development.txt" instead of "base.txt". This set of requirements +# should not be used in production. +development_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/development.txt" + +edxapp_requirements_files: + - "{{ custom_requirements_file }}" + - "{{ development_requirements_file }}" diff --git a/playbooks/roles/edxapp_common/defaults/main.yml b/playbooks/roles/edxapp_common/defaults/main.yml new file mode 100644 index 00000000000..ec19c3573da --- /dev/null +++ b/playbooks/roles/edxapp_common/defaults/main.yml @@ -0,0 +1,36 @@ +--- +# Base role for installing requirements common between edxapp and edxlocal + +edxapp_common_role_name: edxapp_common + +# OS packages +edxapp_common_debian_pkgs_default: + - build-essential + - gfortran + # Needed for sandboxes for CS188x classes :-( + - graphviz + - graphviz-dev + - liblapack-dev + - libmysqlclient-dev + - libxml2-dev + # for shapely + - libgeos-dev + - libxslt1-dev + # for 18n + - gettext + # Pillow (PIL Fork) Dependencies + # Needed by the CMS to manipulate images. + - libjpeg8-dev + # python-saml dependencies: (required for Shibboleth in third_party_auth) + - libxmlsec1-dev + - swig + +edxapp_common_release_specific_debian_pkgs: + xenial: + - libpng12-dev + bionic: + - libpng-dev + focal: + - libpng-dev + +edxapp_common_debian_pkgs: "{{ edxapp_common_debian_pkgs_default + edxapp_common_release_specific_debian_pkgs[ansible_distribution_release] }}" diff --git a/playbooks/roles/edxapp_common/tasks/main.yml b/playbooks/roles/edxapp_common/tasks/main.yml new file mode 100644 index 00000000000..b8d0e537776 --- /dev/null +++ b/playbooks/roles/edxapp_common/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Install system packages + apt: + name: "{{ edxapp_common_debian_pkgs }}" + state: present + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + tags: + - install + - install:base diff --git a/playbooks/roles/edxlocal/defaults/main.yml b/playbooks/roles/edxlocal/defaults/main.yml index f75ce9a2624..8a2d2787ac8 100644 --- a/playbooks/roles/edxlocal/defaults/main.yml +++ b/playbooks/roles/edxlocal/defaults/main.yml @@ -1,5 +1,111 @@ --- edxlocal_debian_pkgs: - - python-mysqldb - - mysql-server-5.5 - postfix + - libjpeg-dev + +edxlocal_databases: + - "{{ ECOMMERCE_DATABASE_NAME | default(None) }}" + - "{{ INSIGHTS_DATABASE_NAME | default(None) }}" + - "{{ XQUEUE_MYSQL_DB_NAME | default(None) }}" + - "{{ EDXAPP_MYSQL_DB_NAME | default(None) }}" + - "{{ EDXAPP_MYSQL_CSMH_DB_NAME | default(None) }}" + - "{{ EDX_NOTES_API_MYSQL_DB_NAME | default(None) }}" + - "{{ ANALYTICS_API_DEFAULT_DB_NAME | default(None) }}" + - "{{ ANALYTICS_API_REPORTS_DB_NAME | default(None) }}" + - "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}" + - "{{ CREDENTIALS_DEFAULT_DB_NAME | default(None) }}" + - "{{ DISCOVERY_DEFAULT_DB_NAME | default(None) }}" + - "{{ VEDA_WEB_FRONTEND_DEFAULT_DB_NAME | default(None) }}" + - "{{ REGISTRAR_DEFAULT_DB_NAME | default(None) }}" + - "{{ LICENSE_MANAGER_DEFAULT_DB_NAME | default(None) }}" + - "{{ ENTERPRISE_CATALOG_DEFAULT_DB_NAME | default(None) }}" + - "{{ COMMERCE_COORDINATOR_DEFAULT_DB_NAME | default(None) }}" + - "{{ EDX_EXAMS_DEFAULT_DB_NAME | default(None) }}" + - "{{ SUBSCRIPTIONS_DEFAULT_DB_NAME | default(None) }}" + +edxlocal_database_users: + - { + db: "{{ ECOMMERCE_DATABASE_NAME | default(None) }}", + user: "{{ ECOMMERCE_DATABASE_USER | default(None) }}", + pass: "{{ ECOMMERCE_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ INSIGHTS_DATABASE_NAME | default(None) }}", + user: "{{ INSIGHTS_DATABASE_USER | default(None) }}", + pass: "{{ INSIGHTS_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ XQUEUE_MYSQL_DB_NAME | default(None) }}", + user: "{{ XQUEUE_MYSQL_USER | default(None) }}", + pass: "{{ XQUEUE_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ EDXAPP_MYSQL_DB_NAME | default(None) }}", + user: "{{ EDXAPP_MYSQL_USER | default(None) }}", + pass: "{{ EDXAPP_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ EDXAPP_MYSQL_CSMH_DB_NAME | default(None) }}", + user: "{{ EDXAPP_MYSQL_CSMH_USER | default(None) }}", + pass: "{{ EDXAPP_MYSQL_CSMH_PASSWORD | default(None) }}" + } + - { + db: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME | default(None) }}", + user: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER | default(None) }}", + pass: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ HIVE_METASTORE_DATABASE_NAME | default(None) }}", + user: "{{ HIVE_METASTORE_DATABASE_USER | default(None) }}", + pass: "{{ HIVE_METASTORE_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}", + user: "{{ BLOCKSTORE_DATABASE_USER | default(None) }}", + pass: "{{ BLOCKSTORE_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ CREDENTIALS_DEFAULT_DB_NAME | default(None) }}", + user: "{{ CREDENTIALS_MYSQL_USER | default(None) }}", + pass: "{{ CREDENTIALS_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ DISCOVERY_DEFAULT_DB_NAME | default(None) }}", + user: "{{ DISCOVERY_MYSQL_USER | default(None) }}", + pass: "{{ DISCOVERY_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ VEDA_WEB_FRONTEND_DEFAULT_DB_NAME | default(None) }}", + user: "{{ VEDA_WEB_FRONTEND_MYSQL_USER | default(None) }}", + pass: "{{ VEDA_WEB_FRONTEND_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ REGISTRAR_DEFAULT_DB_NAME | default(None) }}", + user: "{{ REGISTRAR_MYSQL_USER | default(None) }}", + pass: "{{ REGISTRAR_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ LICENSE_MANAGER_DEFAULT_DB_NAME | default(None) }}", + user: "{{ LICENSE_MANAGER_MYSQL_USER | default(None) }}", + pass: "{{ LICENSE_MANAGER_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ ENTERPRISE_CATALOG_DEFAULT_DB_NAME | default(None) }}", + user: "{{ ENTERPRISE_CATALOG_MYSQL_USER | default(None) }}", + pass: "{{ ENTERPRISE_CATALOG_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ COMMERCE_COORDINATOR_DEFAULT_DB_NAME | default(None) }}", + user: "{{ COMMERCE_COORDINATOR_MYSQL_USER | default(None) }}", + pass: "{{ COMMERCE_COORDINATOR_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ EDX_EXAMS_DEFAULT_DB_NAME | default(None) }}", + user: "{{ EDX_EXAMS_MYSQL_USER | default(None) }}", + pass: "{{ EDX_EXAMS_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ SUBSCRIPTIONS_DEFAULT_DB_NAME | default(None) }}", + user: "{{ SUBSCRIPTIONS_MYSQL_USER | default(None) }}", + pass: "{{ SUBSCRIPTIONS_MYSQL_PASSWORD | default(None) }}" + } diff --git a/playbooks/roles/edxlocal/meta/main.yml b/playbooks/roles/edxlocal/meta/main.yml index 2083f0e1251..28424905b9f 100644 --- a/playbooks/roles/edxlocal/meta/main.yml +++ b/playbooks/roles/edxlocal/meta/main.yml @@ -1,3 +1,4 @@ --- dependencies: - common + - mysql diff --git a/playbooks/roles/edxlocal/tasks/main.yml b/playbooks/roles/edxlocal/tasks/main.yml index a67ee996e23..184faea2a1f 100644 --- a/playbooks/roles/edxlocal/tasks/main.yml +++ b/playbooks/roles/edxlocal/tasks/main.yml @@ -1,73 +1,86 @@ # Installs packages to run edx locally on a single instance -# requires: -# - group_vars/all -# - common/tasks/main.yml -# -# This installs mysql-server-5.5 though -# in production we use mysql-5.1.62. -# -# We could install from source instead: -# http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz # --- -- name: install packages needed for single server - apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present +- name: Install packages needed for single server + apt: + name: "{{ item }}" + install_recommends: yes + state: present + with_items: "{{ edxlocal_debian_pkgs }}" -- name: setup the edxapp db user - mysql_user: > - name={{ EDXAPP_MYSQL_USER }} - password={{ EDXAPP_MYSQL_PASSWORD }} - priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL' +# TODO: Add a test to make sure mysql is running. -- name: create a database for edxapp - mysql_db: > - db=edxapp - state=present - encoding=utf8 - when: EDXAPP_MYSQL_USER is defined - -- name: setup the xqueue db user - mysql_user: > - name={{ XQUEUE_MYSQL_USER }} - password={{ XQUEUE_MYSQL_PASSWORD }} - priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL' - when: XQUEUE_MYSQL_USER is defined and not devstack +- name: create databases + mysql_db: + db: "{{ item }}" + state: present + encoding: utf8 + login_unix_socket: "{{ mysql_socket }}" + when: item != None and item != '' + with_items: "{{ edxlocal_databases }}" -- name: create a database for xqueue - mysql_db: > - db=xqueue - state=present - encoding=utf8 - when: XQUEUE_MYSQL_USER is defined and not devstack +- name: create database users + mysql_user: + name: "{{ item.user }}" + password: "{{ item.pass }}" + priv: "{{ item.db }}.*:ALL" + append_privs: yes + login_unix_socket: "{{ mysql_socket }}" + when: item.db != None and item.db != '' + with_items: "{{ edxlocal_database_users }}" -- name: setup the ora db user - mysql_user: > - name={{ ORA_MYSQL_USER }} - password={{ ORA_MYSQL_PASSWORD }} - priv='{{ORA_MYSQL_DB_NAME}}.*:ALL' +- name: setup the migration db user + mysql_user: + name: "{{ COMMON_MYSQL_MIGRATE_USER }}" + password: "{{ COMMON_MYSQL_MIGRATE_PASS }}" + priv: "{{ item }}.*:ALL" + append_privs: yes + login_unix_socket: "{{ mysql_socket }}" + when: item != None and item != '' + with_items: "{{ edxlocal_databases }}" -- name: create a database for ora - mysql_db: > - db=ora - state=present - encoding=utf8 - when: ORA_MYSQL_USER is defined +- name: create api user for the analytics api + mysql_user: + name: "{{ ANALYTICS_API_DATABASES.default.USER }}" + password: "{{ ANALYTICS_API_DATABASES.default.PASSWORD }}" + priv: '{{ ANALYTICS_API_DATABASES.default.NAME }}.*:ALL/reports.*:SELECT' + login_unix_socket: "{{ mysql_socket }}" + when: ANALYTICS_API_DATABASES is defined and ANALYTICS_API_DATABASES.default is defined -- name: setup the discern db user - mysql_user: > - name={{ DISCERN_MYSQL_USER }} - password={{ DISCERN_MYSQL_PASSWORD }} - priv='{{DISCERN_MYSQL_DB_NAME}}.*:ALL' - when: DISCERN_MYSQL_USER is defined and not devstack +- name: create read-only reports user for the analytics-api + mysql_user: + name: "{{ ANALYTICS_API_DATABASES.reports.USER }}" + password: "{{ ANALYTICS_API_DATABASES.reports.PASSWORD }}" + priv: '{{ ANALYTICS_API_DATABASES.reports.NAME }}.*:SELECT' + login_unix_socket: "{{ mysql_socket }}" + when: ANALYTICS_API_DATABASES is defined and ANALYTICS_API_DATABASES.reports is defined +- name: create a database for the hive metastore + mysql_db: + db: "{{ HIVE_METASTORE_DATABASE.name }}" + state: "present" + encoding: "latin1" + login_unix_socket: "{{ mysql_socket }}" + when: HIVE_METASTORE_DATABASE is defined -- name: create a database for discern - mysql_db: > - db=discern - state=present - encoding=utf8 - when: DISCERN_MYSQL_USER is defined and not devstack +- name: setup the edx-notes-api db user + mysql_user: + name: "{{ EDX_NOTES_API_MYSQL_DB_USER }}" + password: "{{ EDX_NOTES_API_MYSQL_DB_PASS }}" + priv: "{{ EDX_NOTES_API_MYSQL_DB_NAME }}.*:SELECT,INSERT,UPDATE,DELETE" + login_unix_socket: "{{ mysql_socket }}" + when: EDX_NOTES_API_MYSQL_DB_USER is defined +- name: setup the read-only db user + mysql_user: + name: "{{ COMMON_MYSQL_READ_ONLY_USER }}" + password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}" + login_unix_socket: "{{ mysql_socket }}" + priv: "*.*:SELECT" -- name: install memcached - apt: pkg=memcached state=present +- name: setup the admin db user + mysql_user: + name: "{{ COMMON_MYSQL_ADMIN_USER }}" + password: "{{ COMMON_MYSQL_ADMIN_PASS }}" + priv: "*.*:CREATE USER" + login_unix_socket: "{{ mysql_socket }}" diff --git a/playbooks/roles/elasticsearch/defaults/main.yml b/playbooks/roles/elasticsearch/defaults/main.yml index 72ca3d40be5..42e887323d8 100644 --- a/playbooks/roles/elasticsearch/defaults/main.yml +++ b/playbooks/roles/elasticsearch/defaults/main.yml @@ -3,14 +3,15 @@ elasticsearch_app_dir: "{{ COMMON_APP_DIR }}/elasticsearch" elasticsearch_data_dir: "{{ COMMON_DATA_DIR }}/elasticsearch" elasticsearch_log_dir: "{{ COMMON_LOG_DIR }}/elasticsearch" elasticsearch_cfg_dir: "{{ COMMON_CFG_DIR }}/elasticsearch" -elasticsearch_version: "0.90.11" -elasticsearch_sha: "8e81388d0ba7e427b42514d96e25ba6499024c24" -elasticsearch_file: "elasticsearch-{{ elasticsearch_version }}.deb" -elasticsearch_url: "https://download.elasticsearch.org/elasticsearch/elasticsearch/%7B%7B%20elasticsearch_file%20%7D%7D" +elasticsearch_apt_key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" +elasticsearch_repo: "deb [trusted=yes] https://artifacts.elastic.co/packages/7.x/apt stable main" +elasticsearch_package_name: "elasticsearch" elasticsearch_user: "elasticsearch" elasticsearch_group: "elasticsearch" # # Defaults for a single server installation. -ELASTICSEARCH_CLUSTERED: false +ELASTICSEARCH_CLUSTER_MEMBERS: [] ELASTICSEARCH_HEAP_SIZE: "512m" +ELASTICSEARCH_START_TIMEOUT: "600" +ELASTICSEARCH_VERSION: "7.10.2" diff --git a/playbooks/roles/elasticsearch/meta/main.yml b/playbooks/roles/elasticsearch/meta/main.yml new file mode 100644 index 00000000000..2083f0e1251 --- /dev/null +++ b/playbooks/roles/elasticsearch/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/playbooks/roles/elasticsearch/tasks/main.yml b/playbooks/roles/elasticsearch/tasks/main.yml index 5594e7a892b..a07591d7451 100644 --- a/playbooks/roles/elasticsearch/tasks/main.yml +++ b/playbooks/roles/elasticsearch/tasks/main.yml @@ -1,14 +1,13 @@ --- # elasticsearch -# +# # Dependencies: # # * common -# * oraclejdk -# +# # Example play: -# +# # This role can be used to do a single-server or clustered # installation of the elasticsearch service. When a cluster # is being installed, there are two important things that @@ -25,62 +24,117 @@ # - hosts: tag_role_elasticsearch:&tag_environment_stage # roles: # - common -# - oraclejdk # - elasticsearch # -- name: download elasticsearch - get_url: > - url={{ elasticsearch_url }} - dest=/var/tmp/{{ elasticsearch_file }} - force=no +- name: Install Elasticsearch repo key + apt_key: + url: "{{ elasticsearch_apt_key_url }}" + state: present + tags: + - install + - install:base + +- name: Add Elasticsearch Repo + apt_repository: + repo: "{{ elasticsearch_repo }}" + state: present + tags: + - install + - install:base + +- name: install elasticsearch + apt: + pkg: "{{ elasticsearch_package_name }}={{ ELASTICSEARCH_VERSION }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + tags: + - install + - install:base register: elasticsearch_reinstall -- name: install elasticsearch from local package - shell: > - dpkg -i --force-confold /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb - executable=/bin/bash - when: elasticsearch_reinstall.changed + # Prevent elasticsearch from being upgraded. +- dpkg_selections: + name: elasticsearch + selection: hold - name: create directories - file: > - path="{{ item }}" - state=directory - owner="{{ elasticsearch_user }}" - group="{{ elasticsearch_group }}" + file: + path: "{{ item }}" + state: directory + owner: "{{ elasticsearch_user }}" + group: "{{ elasticsearch_group }}" with_items: - "{{ elasticsearch_data_dir }}" - "{{ elasticsearch_log_dir }}" - "{{ elasticsearch_cfg_dir }}" + tags: + - install + - install:base - name: update elasticsearch defaults - template: > - src=etc/default/elasticsearch.j2 dest=/etc/default/elasticsearch - when: ELASTICSEARCH_CLUSTERED - + template: + src: etc/default/elasticsearch.j2 + dest: /etc/default/elasticsearch + tags: + - install + - install:configuration + - name: drop the elasticsearch config - template: > - src=edx/etc/elasticsearch/elasticsearch.yml.j2 dest={{ elasticsearch_cfg_dir }}/elasticsearch.yml - mode=0744 - when: ELASTICSEARCH_CLUSTERED + template: + src: edx/etc/elasticsearch/elasticsearch.yml.j2 + dest: "{{ elasticsearch_cfg_dir }}/elasticsearch.yml" + mode: 0644 + tags: + - install + - install:configuration - name: drop the elasticsearch logging config - template: > - src=edx/etc/elasticsearch/logging.yml.j2 dest={{ elasticsearch_cfg_dir }}/logging.yml - mode=0744 - when: ELASTICSEARCH_CLUSTERED - - # Plugin installation fails hard when the plugin already - # exists. This is problematic if this is upgraded. + copy: + src: /etc/elasticsearch/log4j2.properties + dest: "{{ elasticsearch_cfg_dir }}/log4j2.properties" + remote_src: yes + mode: 0644 + tags: + - install + - install:configuration + +- name: drop the elasticsearch jvm config + template: + src: edx/etc/elasticsearch/jvm.options.j2 + dest: "{{ elasticsearch_cfg_dir }}/jvm.options" + mode: 0644 + tags: + - install + - install:configuration + +- name: drop the elasticsearch systemd service config + template: + src: lib/systemd/system/elasticsearch.service.j2 + dest: "/lib/systemd/system/elasticsearch.service" + mode: 0644 + tags: + - install + - install:configuration -- name: check if the bigdesk plugin is installed - stat: path=/usr/share/elasticsearch/plugins/bigdesk - register: bigdesk - -- name: install bigdesk plugin - shell: > - /usr/share/elasticsearch/bin/plugin -install lukas-vlcek/bigdesk/2.2.0 - when: bigdesk.stat.isdir is not defined - - name: Ensure elasticsearch is enabled and started - service: name=elasticsearch state=restarted enabled=yes \ No newline at end of file + service: + name: elasticsearch + state: started + enabled: yes + tags: + - manage + - manage:start + +- name: Restart elastic when there has been an upgrade + service: + name: elasticsearch + state: restarted + enabled: yes + when: elasticsearch_reinstall.changed + tags: + - manage + - manage:restart + - install diff --git a/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/elasticsearch.yml.j2 b/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/elasticsearch.yml.j2 index 56091346232..82b7d007ccb 100644 --- a/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/elasticsearch.yml.j2 +++ b/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/elasticsearch.yml.j2 @@ -1,40 +1,96 @@ # {{ ansible_managed }} - -# Path to directory where to store index data allocated for this node. +# ======================== Elasticsearch Configuration ========================= +# +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. +# +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. +# +# Please consult the documentation for further information on configuration options: +# https://www.elastic.co/guide/en/elasticsearch/reference/index.html +# +# ---------------------------------- Cluster ----------------------------------- +# +# Use a descriptive name for your cluster: +# +#cluster.name: my-application +# +# ------------------------------------ Node ------------------------------------ +# +# Use a descriptive name for the node: +# +#node.name: node-1 +# +# Add custom attributes to the node: +# +#node.attr.rack: r1 +# +# ----------------------------------- Paths ------------------------------------ +# +# Path to directory where to store the data (separate multiple locations by comma): +# +path.data: {{ elasticsearch_data_dir }} # -path.data: {{elasticsearch_data_dir}} - # Path to log files: # -path.logs: {{elasticsearch_log_dir}} - -# ElasticSearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. +path.logs: {{ elasticsearch_log_dir }} # -# Set this property to true to lock the memory: +# ----------------------------------- Memory ----------------------------------- # -bootstrap.mlockall: true - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. +# Lock the memory on startup: # -# 1. Disable multicast discovery (enabled by default): +#bootstrap.memory_lock: true # -# discovery.zen.ping.multicast.enabled: false +# Make sure that the heap size is set to about half the memory available +# on the system and that the owner of the process is allowed to use this +# limit. # -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: +# Elasticsearch performs poorly when the system is swapping the memory. # -# discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"] -{%- if ELASTICSEARCH_CLUSTERED -%} - {%- set hosts= [] -%} - - {%- for host in hostvars.keys() -%} - {% do hosts.append(host) %} - {%- endfor %} - -discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}'] - -{% endif -%} \ No newline at end of file +# ---------------------------------- Network ----------------------------------- +# +# Set the bind address to a specific IP (IPv4 or IPv6): +# +#network.host: 192.168.0.1 +# +# Set a custom port for HTTP: +# +#http.port: 9200 +# +{% if vagrant_cluster|bool %} +network.host: {{ ansible_ssh_host }} +{% endif %} +# For more information, consult the network module documentation. +# +# --------------------------------- Discovery ---------------------------------- +# +# Pass an initial list of hosts to perform discovery when this node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] +# +#discovery.seed_hosts: ["host1", "host2"] +{% if ELASTICSEARCH_CLUSTER_MEMBERS|length > 1 -%} +discovery.seed_hosts: ['{{ELASTICSEARCH_CLUSTER_MEMBERS|join("\',\'") }}'] +{% endif -%} +# +# Bootstrap the cluster using an initial set of master-eligible nodes: +# +#cluster.initial_master_nodes: ["node-1", "node-2"] +# +# For more information, consult the discovery and cluster formation module documentation. +# +# ---------------------------------- Gateway ----------------------------------- +# +# Block initial recovery after a full cluster restart until N nodes are started: +# +#gateway.recover_after_nodes: 3 +# +# For more information, consult the gateway module documentation. +# +# ---------------------------------- Various ----------------------------------- +# +# Require explicit names when deleting indices: +# +#action.destructive_requires_name: true diff --git a/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/jvm.options.j2 b/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/jvm.options.j2 new file mode 100644 index 00000000000..09c07afb7a2 --- /dev/null +++ b/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/jvm.options.j2 @@ -0,0 +1,79 @@ +# {{ ansible_managed }} + +## JVM configuration + +################################################################ +## IMPORTANT: JVM heap size +################################################################ +## +## You should always set the min and max JVM heap +## size to the same value. For example, to set +## the heap to 4 GB, set: +## +## -Xms4g +## -Xmx4g +## +## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html +## for more information +## +################################################################ + +# Xms represents the initial size of total heap space +# Xmx represents the maximum size of total heap space + +-Xms{{ ELASTICSEARCH_HEAP_SIZE }} +-Xmx{{ ELASTICSEARCH_HEAP_SIZE }} + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC configuration +8-13:-XX:+UseConcMarkSweepGC +8-13:-XX:CMSInitiatingOccupancyFraction=75 +8-13:-XX:+UseCMSInitiatingOccupancyOnly + +## G1GC Configuration +# NOTE: G1 GC is only supported on JDK version 10 or later +# to use G1GC, uncomment the next two lines and update the version on the +# following three lines to your version of the JDK +# 10-13:-XX:-UseConcMarkSweepGC +# 10-13:-XX:-UseCMSInitiatingOccupancyOnly +14-:-XX:+UseG1GC +14-:-XX:G1ReservePercent=25 +14-:-XX:InitiatingHeapOccupancyPercent=30 + +## JVM temporary directory +-Djava.io.tmpdir=${ES_TMPDIR} + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps; ensure the directory exists and +# has sufficient space +-XX:HeapDumpPath=/var/lib/elasticsearch + +# specify an alternative path for JVM fatal error logs +-XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log + +## JDK 8 GC logging +8:-XX:+PrintGCDetails +8:-XX:+PrintGCDateStamps +8:-XX:+PrintTenuringDistribution +8:-XX:+PrintGCApplicationStoppedTime +8:-Xloggc:/var/log/elasticsearch/gc.log +8:-XX:+UseGCLogFileRotation +8:-XX:NumberOfGCLogFiles=32 +8:-XX:GCLogFileSize=64m + +# JDK 9+ GC logging +9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m diff --git a/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/logging.yml.j2 b/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/logging.yml.j2 deleted file mode 100644 index 6978a6fdc2e..00000000000 --- a/playbooks/roles/elasticsearch/templates/edx/etc/elasticsearch/logging.yml.j2 +++ /dev/null @@ -1,56 +0,0 @@ -# you can override this using by setting a system property, for example -Des.logger.level=DEBUG -es.logger.level: INFO -rootLogger: ${es.logger.level}, console, file -logger: - # log action execution errors for easier debugging - action: DEBUG - # reduce the logging for aws, too much is logged under the default INFO - com.amazonaws: WARN - - # gateway - #gateway: DEBUG - #index.gateway: DEBUG - - # peer shard recovery - #indices.recovery: DEBUG - - # discovery - #discovery: TRACE - - index.search.slowlog: TRACE, index_search_slow_log_file - index.indexing.slowlog: TRACE, index_indexing_slow_log_file - -additivity: - index.search.slowlog: false - index.indexing.slowlog: false - -appender: - console: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_search_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_search_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_indexing_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" \ No newline at end of file diff --git a/playbooks/roles/elasticsearch/templates/etc/default/elasticsearch.j2 b/playbooks/roles/elasticsearch/templates/etc/default/elasticsearch.j2 index d26cbe926b8..f577bab3b34 100644 --- a/playbooks/roles/elasticsearch/templates/etc/default/elasticsearch.j2 +++ b/playbooks/roles/elasticsearch/templates/etc/default/elasticsearch.j2 @@ -1,41 +1,54 @@ # {{ ansible_managed }} +################################ +# Elasticsearch +################################ -# Run ElasticSearch as this user ID and group ID -#ES_USER=elasticsearch -#ES_GROUP=elasticsearch +# Elasticsearch home directory +#ES_HOME=/usr/share/elasticsearch -# Heap Size (defaults to 256m min, 1g max) -ES_HEAP_SIZE={{ ELASTICSEARCH_HEAP_SIZE }} +# Elasticsearch Java path +#JAVA_HOME= -# Heap new generation -#ES_HEAP_NEWSIZE= +# Elasticsearch configuration directory +# Note: this setting will be shared with command-line tools +ES_PATH_CONF={{ elasticsearch_cfg_dir }} -# max direct memory -#ES_DIRECT_SIZE= +# Elasticsearch PID directory +#PID_DIR=/var/run/elasticsearch -# Maximum number of open files, defaults to 65535. -#MAX_OPEN_FILES=65535 +# Additional Java OPTS +#ES_JAVA_OPTS= -# Maximum locked memory size. Set to "unlimited" if you use the -# bootstrap.mlockall option in elasticsearch.yml. You must also set -# ES_HEAP_SIZE. -#MAX_LOCKED_MEMORY=unlimited +# Configure restart on package upgrade (true, every other setting will lead to not restarting) +#RESTART_ON_UPGRADE=true -# ElasticSearch log directory -LOG_DIR={{ elasticsearch_log_dir }} +################################ +# Elasticsearch service +################################ -# ElasticSearch data directory -DATA_DIR={{ COMMON_DATA_DIR }} +# SysV init.d +# +# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process +ES_STARTUP_SLEEP_TIME=5 -# ElasticSearch work directory -#WORK_DIR=/tmp/elasticsearch +################################ +# System properties +################################ -# ElasticSearch configuration directory -CONF_DIR={{ elasticsearch_cfg_dir }} +# Specifies the maximum file descriptor number that can be opened by this process +# When using Systemd, this setting is ignored and the LimitNOFILE defined in +# /usr/lib/systemd/system/elasticsearch.service takes precedence +#MAX_OPEN_FILES=65535 -# ElasticSearch configuration file (elasticsearch.yml) -CONF_FILE=${CONF_DIR}/elasticsearch.yml +# The maximum number of bytes of memory that may be locked into RAM +# Set to "unlimited" if you use the 'bootstrap.memory_lock: true' option +# in elasticsearch.yml. +# When using systemd, LimitMEMLOCK must be set in a unit file such as +# /etc/systemd/system/elasticsearch.service.d/override.conf. +#MAX_LOCKED_MEMORY=unlimited -# Additional Java OPTS -#ES_JAVA_OPTS= +# Maximum number of VMA (Virtual Memory Areas) a process can own +# When using Systemd, this setting is ignored and the 'vm.max_map_count' +# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf +#MAX_MAP_COUNT=262144 diff --git a/playbooks/roles/elasticsearch/templates/lib/systemd/system/elasticsearch.service.j2 b/playbooks/roles/elasticsearch/templates/lib/systemd/system/elasticsearch.service.j2 new file mode 100644 index 00000000000..94165ee6204 --- /dev/null +++ b/playbooks/roles/elasticsearch/templates/lib/systemd/system/elasticsearch.service.j2 @@ -0,0 +1,66 @@ +[Unit] +Description=Elasticsearch +Documentation=https://www.elastic.co +Wants=network-online.target +After=network-online.target + +[Service] +Type=notify +RuntimeDirectory=elasticsearch +PrivateTmp=true +Environment=ES_HOME=/usr/share/elasticsearch +Environment=ES_PATH_CONF=/etc/elasticsearch +Environment=PID_DIR=/var/run/elasticsearch +Environment=ES_SD_NOTIFY=true +EnvironmentFile=-/etc/default/elasticsearch + +WorkingDirectory=/usr/share/elasticsearch + +User={{ elasticsearch_user }} +Group={{ elasticsearch_group }} + +ExecStart=/usr/share/elasticsearch/bin/systemd-entrypoint -p ${PID_DIR}/elasticsearch.pid --quiet + +# StandardOutput is configured to redirect to journalctl since +# some error messages may be logged in standard output before +# elasticsearch logging system is initialized. Elasticsearch +# stores its logs in /var/log/elasticsearch and does not use +# journalctl by default. If you also want to enable journalctl +# logging, you can simply remove the "quiet" option from ExecStart. +StandardOutput=journal +StandardError=inherit + +# Specifies the maximum file descriptor number that can be opened by this process +LimitNOFILE=65535 + +# Specifies the maximum number of processes +LimitNPROC=4096 + +# Specifies the maximum size of virtual memory +LimitAS=infinity + +# Specifies the maximum file size +LimitFSIZE=infinity + +# Disable timeout logic and wait until process is stopped +TimeoutStopSec=0 + +# SIGTERM signal is used to stop the Java process +KillSignal=SIGTERM + +# Send the signal only to the JVM rather than its control group +KillMode=process + +# Java process is never killed +SendSIGKILL=no + +# When a JVM receives a SIGTERM signal it exits with code 143 +SuccessExitStatus=143 + +# Allow a slow startup before the systemd notifier module kicks in to extend the timeout +TimeoutStartSec={{ ELASTICSEARCH_START_TIMEOUT }} + +[Install] +WantedBy=multi-user.target + +# Built for packages-7.10.2 (packages) diff --git a/playbooks/roles/enhanced_networking/defaults/main.yml b/playbooks/roles/enhanced_networking/defaults/main.yml new file mode 100644 index 00000000000..00aafe5096a --- /dev/null +++ b/playbooks/roles/enhanced_networking/defaults/main.yml @@ -0,0 +1,2 @@ +profile: edx +compatible_instance_types: ['c3', 'c4', 'd2', 'i2', 'm4', 'r3'] \ No newline at end of file diff --git a/playbooks/roles/enhanced_networking/tasks/main.yml b/playbooks/roles/enhanced_networking/tasks/main.yml new file mode 100644 index 00000000000..ceee6f54ce4 --- /dev/null +++ b/playbooks/roles/enhanced_networking/tasks/main.yml @@ -0,0 +1,71 @@ +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role enhanced_networking +# +# Overview: +# +# This role ensures that enhanced networking +# (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html) +# is turned on on the AWS EC2 instances to which it it is applied. +# +# Note that turning on enhanced networking requires this role restart the instances +# where it turns it on. Instances where enhanced networking is already on or where +# enhanced networking is not supported for their instance type will NOT be restarted. +# +# Uses local actions so multiple actions can be done in parallel. +# +- name: Get ec2 facts + ec2_metadata_facts: + +- name: Test for enhanced networking + local_action: + module: shell aws ec2 describe-instance-attribute --instance-id {{ ansible_ec2_instance_id }} --attribute sriovNetSupport + changed_when: False + become: False + register: enhanced_networking_raw + +- name: Test for kernel module + shell: 'modinfo ixgbevf | grep -E ^version: | sed -E "s/^version: *//"' + register: ixgbevf_kernel_module + ignore_errors: yes + +- set_fact: + # AWS would like 2.16 or 2.14 of this module, but says that Ubuntu's default of 2.11.3-k is ok + has_ixgbevf_kernel_module: "{{ (ixgbevf_kernel_module.stdout | default('0.0.0')) | replace('-k','') | version_compare('2.11.3','>=', strict=True) }}" + supports_enhanced_networking: "{{ (ansible_ec2_instance_type[:2] | lower) in compatible_instance_types }}" + enhanced_networking_already_on: "{{ (enhanced_networking_raw.stdout | from_json).SriovNetSupport.Value | default(None) == 'simple' }}" + +- name: Shut down instances + local_action: + module: ec2 + instance_ids: "{{ ansible_ec2_instance_id }}" + state: stopped + region: "{{ ansible_ec2_placement_region }}" + profile: "{{ profile }}" + wait: yes + become: False + when: supports_enhanced_networking and has_ixgbevf_kernel_module and not enhanced_networking_already_on + +- name: Set enhanced networking instance attribute + local_action: + module: shell aws ec2 modify-instance-attribute --instance-id {{ ansible_ec2_instance_id }} --sriov-net-support simple + when: supports_enhanced_networking and has_ixgbevf_kernel_module and not enhanced_networking_already_on + +- name: Start instances + local_action: + module: ec2 + ansible_ec2_instance_ids: "{{ ansible_ec2_instance_id }}" + state: running + region: "{{ ansible_ec2_placement_region }}" + profile: "{{ profile }}" + wait: yes + become: False + when: supports_enhanced_networking and has_ixgbevf_kernel_module and not enhanced_networking_already_on diff --git a/playbooks/roles/enterprise_catalog/defaults/main.yml b/playbooks/roles/enterprise_catalog/defaults/main.yml new file mode 100644 index 00000000000..939a79ff96b --- /dev/null +++ b/playbooks/roles/enterprise_catalog/defaults/main.yml @@ -0,0 +1,167 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role enterprise_catalog +# + + +# +# vars are namespace with the module name. +# +enterprise_catalog_service_name: 'enterprise_catalog' + +enterprise_catalog_user: "{{ enterprise_catalog_service_name }}" +enterprise_catalog_home: "{{ COMMON_APP_DIR }}/{{ enterprise_catalog_service_name }}" +enterprise_catalog_app_dir: "{{ COMMON_APP_DIR }}/{{ enterprise_catalog_service_name }}" +enterprise_catalog_code_dir: "{{ enterprise_catalog_app_dir }}/{{ enterprise_catalog_service_name }}" +enterprise_catalog_venvs_dir: "{{ enterprise_catalog_app_dir }}/venvs" +enterprise_catalog_venv_dir: "{{ enterprise_catalog_venvs_dir }}/enterprise_catalog" + +enterprise_catalog_celery_default_queue: 'enterprise_catalog.default' + +enterprise_catalog_hostname: 'enterprise-catalog' + +ENTERPRISE_CATALOG_CELERY_ALWAYS_EAGER: false +ENTERPRISE_CATALOG_CELERY_BROKER_TRANSPORT: '' +ENTERPRISE_CATALOG_CELERY_BROKER_USER: '' +ENTERPRISE_CATALOG_CELERY_BROKER_PASSWORD: '' +ENTERPRISE_CATALOG_CELERY_BROKER_HOSTNAME: '' +ENTERPRISE_CATALOG_CELERY_BROKER_VHOST: '' + +ENTERPRISE_CATALOG_USE_PYTHON38: True + +enterprise_catalog_environment: + ENTERPRISE_CATALOG_CFG: '{{ COMMON_CFG_DIR }}/{{ enterprise_catalog_service_name }}.yml' + +enterprise_catalog_gunicorn_port: 8160 + +enterprise_catalog_debian_pkgs: [] + +ENTERPRISE_CATALOG_ENABLE_EXPERIMENTAL_DOCKER_SHIM: false + +ENTERPRISE_CATALOG_NGINX_PORT: '1{{ enterprise_catalog_gunicorn_port }}' +ENTERPRISE_CATALOG_SSL_NGINX_PORT: '4{{ enterprise_catalog_gunicorn_port }}' + +ENTERPRISE_CATALOG_DEFAULT_DB_NAME: 'enterprise_catalog' +ENTERPRISE_CATALOG_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +ENTERPRISE_CATALOG_MYSQL_USER: 'entcatalog001' +ENTERPRISE_CATALOG_MYSQL_PASSWORD: 'password' + +ENTERPRISE_CATALOG_MEMCACHE: [ 'memcache' ] + +ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE: 'enterprise_catalog.settings.production' +ENTERPRISE_CATALOG_DOMAIN: 'localhost' +ENTERPRISE_CATALOG_URL_ROOT: 'http://{{ ENTERPRISE_CATALOG_DOMAIN }}:{{ ENTERPRISE_CATALOG_NGINX_PORT }}' +ENTERPRISE_CATALOG_LOGOUT_URL: '{{ ENTERPRISE_CATALOG_URL_ROOT }}/logout/' + +ENTERPRISE_CATALOG_LANGUAGE_CODE: 'en' +ENTERPRISE_CATALOG_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +ENTERPRISE_CATALOG_SERVICE_USER: 'enterprise_catalog_service_user' + +ENTERPRISE_CATALOG_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ enterprise_catalog_service_name }}' +ENTERPRISE_CATALOG_MEDIA_ROOT: '{{ ENTERPRISE_CATALOG_DATA_DIR }}/media' +ENTERPRISE_CATALOG_MEDIA_URL: '/media/' + +ENTERPRISE_CATALOG_MEDIA_STORAGE_BACKEND: +DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' +MEDIA_ROOT: '{{ ENTERPRISE_CATALOG_MEDIA_ROOT }}' +MEDIA_URL: '{{ ENTERPRISE_CATALOG_MEDIA_URL }}' + +# TODO: Let edx_django_service manage ENTERPRISE_CATALOG_STATIC_ROOT in phase 2. +ENTERPRISE_CATALOG_STATIC_ROOT: '{{ ENTERPRISE_CATALOG_DATA_DIR }}/staticfiles' +ENTERPRISE_CATALOG_STATIC_URL: '/static/' + +ENTERPRISE_CATALOG_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +ENTERPRISE_CATALOG_CORS_ORIGIN_ALLOW_ALL: false +ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_DEFAULT: +- '{{ ENTERPRISE_CATALOG_DOMAIN }}' + +ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_EXTRA: [] +ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST: '{{ ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_DEFAULT + ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_EXTRA }}' + +ENTERPRISE_CATALOG_VERSION: 'master' + +ENTERPRISE_CATALOG_GUNICORN_EXTRA: '' + +ENTERPRISE_CATALOG_EXTRA_APPS: [] + +ENTERPRISE_CATALOG_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +ENTERPRISE_CATALOG_CERTIFICATE_LANGUAGES: +'en': 'English' +'es_419': 'Spanish' + +enterprise_catalog_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ ENTERPRISE_CATALOG_CERTIFICATE_LANGUAGES }}' + ENTERPRISE_CATALOG_SERVICE_USER: '{{ ENTERPRISE_CATALOG_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ ENTERPRISE_CATALOG_LANGUAGE_COOKIE_NAME }}' + CSRF_COOKIE_SECURE: "{{ ENTERPRISE_CATALOG_CSRF_COOKIE_SECURE }}" + CELERY_ALWAYS_EAGER: '{{ ENTERPRISE_CATALOG_CELERY_ALWAYS_EAGER }}' + CELERY_BROKER_TRANSPORT: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_TRANSPORT }}' + CELERY_BROKER_USER: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_USER }}' + CELERY_BROKER_PASSWORD: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_PASSWORD }}' + CELERY_BROKER_HOSTNAME: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_HOSTNAME }}' + CELERY_BROKER_VHOST: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_VHOST }}' + CELERY_DEFAULT_EXCHANGE: 'enterprise_catalog' + CELERY_DEFAULT_ROUTING_KEY: 'enterprise_catalog' + CELERY_DEFAULT_QUEUE: '{{ enterprise_catalog_celery_default_queue }}' + +# See edx_django_service_automated_users for an example of what this should be +ENTERPRISE_CATALOG_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) +ENTERPRISE_CATALOG_LMS_URL_ROOT: !!null +ENTERPRISE_CATALOG_DISCOVERY_API_URL: !!null + +ENTERPRISE_CATALOG_CSRF_COOKIE_SECURE: false + +ENTERPRISE_CATALOG_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +enterprise_catalog_post_migrate_commands: [] + +ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'enterprise-catalog-sso-key' +ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'enterprise-catalog-sso-secret' +ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'enterprise-catalog-backend-service-key' +ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'enterprise-catalog-backend-service-secret' +ENTERPRISE_CATALOG_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +ENTERPRISE_CATALOG_GIT_IDENTITY: !!null + +ENTERPRISE_CATALOG_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ COMMON_GIT_PATH }}' + REPO: 'enterprise-catalog.git' + VERSION: '{{ ENTERPRISE_CATALOG_VERSION }}' + DESTINATION: "{{ enterprise_catalog_code_dir }}" + SSH_KEY: '{{ ENTERPRISE_CATALOG_GIT_IDENTITY }}' + +ENTERPRISE_CATALOG_SECRET_KEY: 'SET-ME-PLEASE' + +# Remote config +ENTERPRISE_CATALOG_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ENTERPRISE_CATALOG_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ENTERPRISE_CATALOG_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +ENTERPRISE_CATALOG_ENABLE_ADMIN_URLS_RESTRICTION: false +ENTERPRISE_CATALOG_ADMIN_URLS: + - admin + +# Worker settings +worker_django_settings_module: "{{ ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE }}" +ENTERPRISE_CATALOG_CELERY_WORKERS: + - queue: '{{ enterprise_catalog_celery_default_queue }}' + concurrency: 4 + monitor: True +enterprise_catalog_workers: "{{ ENTERPRISE_CATALOG_CELERY_WORKERS }}" diff --git a/playbooks/roles/enterprise_catalog/meta/main.yml b/playbooks/roles/enterprise_catalog/meta/main.yml new file mode 100644 index 00000000000..57255fe4748 --- /dev/null +++ b/playbooks/roles/enterprise_catalog/meta/main.yml @@ -0,0 +1,58 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role enterprise_catalog + +dependencies: + - role: edx_django_service + edx_django_service_use_python38: '{{ ENTERPRISE_CATALOG_USE_PYTHON38 }}' + edx_django_service_enable_experimental_docker_shim: '{{ ENTERPRISE_CATALOG_ENABLE_EXPERIMENTAL_DOCKER_SHIM }}' + edx_django_service_version: '{{ ENTERPRISE_CATALOG_VERSION }}' + edx_django_service_name: '{{ enterprise_catalog_service_name }}' + edx_django_service_config_overrides: '{{ enterprise_catalog_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ enterprise_catalog_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ enterprise_catalog_gunicorn_port }}' + edx_django_service_repos: '{{ ENTERPRISE_CATALOG_REPOS }}' + edx_django_service_django_settings_module: '{{ ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ enterprise_catalog_environment }}' + edx_django_service_gunicorn_extra: '{{ ENTERPRISE_CATALOG_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ ENTERPRISE_CATALOG_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ ENTERPRISE_CATALOG_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ ENTERPRISE_CATALOG_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ ENTERPRISE_CATALOG_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ ENTERPRISE_CATALOG_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ ENTERPRISE_CATALOG_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ ENTERPRISE_CATALOG_MEMCACHE }}' + edx_django_service_default_db_host: '{{ ENTERPRISE_CATALOG_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ ENTERPRISE_CATALOG_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ ENTERPRISE_CATALOG_MYSQL_USER }}' + edx_django_service_db_password: '{{ ENTERPRISE_CATALOG_MYSQL_PASSWORD }}' + edx_django_service_extra_apps: '{{ ENTERPRISE_CATALOG_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ ENTERPRISE_CATALOG_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ ENTERPRISE_CATALOG_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ enterprise_catalog_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ ENTERPRISE_CATALOG_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ ENTERPRISE_CATALOG_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ ENTERPRISE_CATALOG_COPY_CONFIG_ENABLED }}' + edx_django_service_migration_check_services: '{{ enterprise_catalog_service_name }},{{ enterprise_catalog_service_name }}_workers' + edx_django_service_enable_celery_workers: true + edx_django_service_workers: '{{ enterprise_catalog_workers }}' + # Need this override because the existing AWS ASGs have a services tag with a name that doesn't match the convention + edx_django_service_workers_supervisor_conf: 'enterprise_catalog_worker.conf' + edx_django_service_docker_image_name: 'openedx/enterprise-catalog' + edx_django_service_hostname: '~^((stage|prod)-)?{{ enterprise_catalog_hostname }}.*' + EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: '{{ ENTERPRISE_CATALOG_ENABLE_ADMIN_URLS_RESTRICTION }}' + EDX_DJANGO_SERVICE_ADMIN_URLS: '{{ ENTERPRISE_CATALOG_ADMIN_URLS }}' diff --git a/playbooks/roles/enterprise_catalog/tasks/main.yml b/playbooks/roles/enterprise_catalog/tasks/main.yml new file mode 100644 index 00000000000..3ffc9be1683 --- /dev/null +++ b/playbooks/roles/enterprise_catalog/tasks/main.yml @@ -0,0 +1,22 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role enterprise_catalog +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# diff --git a/playbooks/roles/flower/defaults/main.yml b/playbooks/roles/flower/defaults/main.yml new file mode 100644 index 00000000000..a83525c144c --- /dev/null +++ b/playbooks/roles/flower/defaults/main.yml @@ -0,0 +1,41 @@ +--- + +# By default, point to the RabbitMQ broker running locally +FLOWER_BROKER_USERNAME: "celery" +FLOWER_BROKER_PASSWORD: "celery" +FLOWER_BROKER_HOST: "127.0.0.1" +FLOWER_BROKER_PORT: 5672 +FLOWER_ADDRESS: "0.0.0.0" +FLOWER_PORT: "5555" +FLOWER_BROKER_TRANSPORT: 'amqp' + +FLOWER_OAUTH2_KEY: "A Client ID from Google's OAUTH2 provider" +FLOWER_OAUTH2_SECRET: "A Client Secret from Google's OAUTH2 provider" +FLOWER_OAUTH2_REDIRECT: "A URL registered with Google's OAUTH2 provider" +FLOWER_AUTH_REGEX: ".*@example.com" # Can be blank to disable auth +# A list of user:password pairs seperated by a comma to restrict flower access +# using usernames and passwords +FLOWER_BASIC_AUTH: [] + +FLOWER_USER: "flower" +flower_app_dir: "{{ COMMON_APP_DIR }}/{{ FLOWER_USER }}" +flower_data_dir: "{{ COMMON_DATA_DIR }}/{{ FLOWER_USER }}" +flower_log_dir: "{{ COMMON_LOG_DIR }}/{{ FLOWER_USER }}" +flower_conf_dir: "{{ flower_app_dir }}" + +flower_venv_dir: "{{ flower_app_dir }}/venvs/flower" +flower_venv_bin: "{{ flower_venv_dir }}/bin" +flower_python_version: "python3.8" + +flower_python_reqs: +# Celery version must match version used by edx-platform + - "flower==1.0.0" + - "celery==5.2.3" + - "redis==4.1.1" + +flower_deploy_path: "{{ flower_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/bin:/sbin:/bin" + +flower_broker: "{{ FLOWER_BROKER_TRANSPORT }}://{{ FLOWER_BROKER_USERNAME }}:{{ FLOWER_BROKER_PASSWORD }}@{{ FLOWER_BROKER_HOST }}:{{ FLOWER_BROKER_PORT }}" + +flower_environment: + PATH: "{{ flower_deploy_path }}" diff --git a/playbooks/roles/flower/handlers/main.yml b/playbooks/roles/flower/handlers/main.yml new file mode 100644 index 00000000000..2ba91d1c454 --- /dev/null +++ b/playbooks/roles/flower/handlers/main.yml @@ -0,0 +1,9 @@ +--- +- name: restart flower + supervisorctl: + state: restarted + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + name: "{{ FLOWER_USER }}" + become: true + become_user: "{{ supervisor_service_user }}" diff --git a/playbooks/roles/flower/meta/main.yml b/playbooks/roles/flower/meta/main.yml new file mode 100644 index 00000000000..6d2cae542c5 --- /dev/null +++ b/playbooks/roles/flower/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - supervisor diff --git a/playbooks/roles/flower/tasks/main.yml b/playbooks/roles/flower/tasks/main.yml new file mode 100644 index 00000000000..965cc1e540f --- /dev/null +++ b/playbooks/roles/flower/tasks/main.yml @@ -0,0 +1,78 @@ +--- +- name: Create application user + user: + name: "{{ FLOWER_USER }}" + home: "{{ flower_app_dir }}" + createhome: no + shell: /bin/false + notify: + - restart flower + +- name: Create flower user dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ FLOWER_USER }}" + group: "{{ common_web_group }}" + notify: + - restart flower + with_items: + - "{{ flower_app_dir }}" + - "{{ flower_data_dir }}" + - "{{ flower_venv_dir }}" + - "{{ flower_log_dir }}" + +- name: Create flower environment script and configuration file + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ FLOWER_USER }}" + group: "{{ common_web_group }}" + mode: "0640" + notify: + - restart flower + with_items: + - { src: 'edx/app/flower/flower_env.j2', dest: '{{ flower_app_dir }}/flower_env' } + - { src: 'edx/app/flower/flowerconfig.py.j2', dest: '{{ flower_conf_dir }}/flowerconfig.py' } + +- name: Create virtualenv and install Python requirements + pip: + name: "{{ item }}" + virtualenv: "{{ flower_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + virtualenv_python: "{{ flower_python_version }}" + become_user: "{{ FLOWER_USER }}" + environment: "{{ flower_environment }}" + with_items: "{{ flower_python_reqs }}" + notify: + - restart flower + +- name: Create supervisor configuration + template: + src: "edx/app/supervisor/conf.d.available/flower.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ FLOWER_USER }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ supervisor_user }}" + mode: 0644 + become_user: "{{ supervisor_user }}" + notify: + - restart flower + +- name: Enable supervisor configuration + file: + src: "{{ supervisor_available_dir }}/{{ FLOWER_USER }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ FLOWER_USER }}.conf" + state: link + force: yes + become_user: "{{ supervisor_user }}" + notify: + - restart flower + +- name: Update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + register: supervisor_update + become_user: "{{ supervisor_service_user }}" + changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" + notify: + - restart flower diff --git a/playbooks/roles/flower/templates/edx/app/flower/flower_env.j2 b/playbooks/roles/flower/templates/edx/app/flower/flower_env.j2 new file mode 100644 index 00000000000..f3f2458a24e --- /dev/null +++ b/playbooks/roles/flower/templates/edx/app/flower/flower_env.j2 @@ -0,0 +1,6 @@ +# {{ ansible_managed }} +{% for name,value in flower_environment.items() %} +{%- if value %} +export {{ name }}="{{ value }}" +{%- endif %} +{% endfor %} diff --git a/playbooks/roles/flower/templates/edx/app/flower/flowerconfig.py.j2 b/playbooks/roles/flower/templates/edx/app/flower/flowerconfig.py.j2 new file mode 100644 index 00000000000..f7a7d5fd49f --- /dev/null +++ b/playbooks/roles/flower/templates/edx/app/flower/flowerconfig.py.j2 @@ -0,0 +1,8 @@ +# {{ ansible_managed }} +address = "{{ FLOWER_ADDRESS }}" +port = {{ FLOWER_PORT }} +oauth2_key = "{{ FLOWER_OAUTH2_KEY }}" +oauth2_secret = "{{ FLOWER_OAUTH2_SECRET }}" +oauth2_redirect_uri = "{{ FLOWER_OAUTH2_REDIRECT }}" +auth = "{{ FLOWER_AUTH_REGEX }}" +basic_auth = {{ FLOWER_BASIC_AUTH }} diff --git a/playbooks/roles/flower/templates/edx/app/supervisor/conf.d.available/flower.conf.j2 b/playbooks/roles/flower/templates/edx/app/supervisor/conf.d.available/flower.conf.j2 new file mode 100644 index 00000000000..746ce2c8d47 --- /dev/null +++ b/playbooks/roles/flower/templates/edx/app/supervisor/conf.d.available/flower.conf.j2 @@ -0,0 +1,7 @@ +[program:{{ FLOWER_USER }}] + +environment=PATH="{{ flower_deploy_path }}" +user={{ common_web_user }} +command={{ flower_venv_bin }}/celery --broker {{ flower_broker }} flower --conf={{ flower_conf_dir }}/flowerconfig.py +stdout_logfile={{ supervisor_log_dir }}/{{ FLOWER_USER }}-stdout.log +stderr_logfile={{ supervisor_log_dir }}/{{ FLOWER_USER }}-stderr.log diff --git a/playbooks/roles/forum/defaults/main.yml b/playbooks/roles/forum/defaults/main.yml index 9e6b8f82a0f..93228acc236 100644 --- a/playbooks/roles/forum/defaults/main.yml +++ b/playbooks/roles/forum/defaults/main.yml @@ -1,15 +1,17 @@ --- forum_app_dir: "{{ COMMON_APP_DIR }}/forum" forum_code_dir: "{{ forum_app_dir }}/cs_comments_service" +forum_binstubs_dir: "{{ forum_code_dir }}/bin" forum_data_dir: "{{ COMMON_DATA_DIR }}/forum" forum_rbenv_dir: "{{ forum_app_dir }}" forum_rbenv_root: "{{ forum_app_dir }}/.rbenv" forum_rbenv_shims: "{{ forum_rbenv_root }}/shims" forum_rbenv_bin: "{{ forum_rbenv_root }}/bin" +forum_gemfile: 'Gemfile3' forum_supervisor_wrapper: "{{ forum_app_dir }}/forum-supervisor.sh" forum_gem_root: "{{ forum_rbenv_dir }}/.gem" forum_gem_bin: "{{ forum_gem_root }}/bin" -forum_path: "{{ forum_code_dir }}/bin:{{ forum_rbenv_bin }}:{{ forum_rbenv_shims }}:{{ forum_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +forum_path: "{{ forum_binstubs_dir }}:{{ forum_rbenv_bin }}:{{ forum_rbenv_shims }}:{{ forum_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" FORUM_MONGO_USER: "cs_comments_service" FORUM_MONGO_PASSWORD: "password" @@ -18,19 +20,54 @@ FORUM_MONGO_HOSTS: FORUM_MONGO_TAGS: !!null FORUM_MONGO_PORT: "27017" FORUM_MONGO_DATABASE: "cs_comments_service" -FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{host}}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?tags={{ FORUM_MONGO_TAGS }}{%- endif -%}" +FORUM_MONGO_AUTH_DB: "" +# Must be set if user credentials are provided. +# Can be one of :scram, :mongodb_cr, :mongodb_x509, :plain, or empty string "" if no credentials. +FORUM_MONGO_AUTH_MECH: ":scram" +FORUM_MONGO_USE_SSL: false +FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{ host }}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?tags={{ FORUM_MONGO_TAGS }}{%- endif -%}" FORUM_SINATRA_ENV: "development" FORUM_RACK_ENV: "development" FORUM_NGINX_PORT: "18080" +# FORUM_API_KEY must match EDXAPP_COMMENTS_SERVICE_KEY FORUM_API_KEY: "password" FORUM_ELASTICSEARCH_HOST: "localhost" FORUM_ELASTICSEARCH_PORT: "9200" -FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}" -FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key" -FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app" +FORUM_ELASTICSEARCH_PROTOCOL: "http" +FORUM_ELASTICSEARCH_URL: "{{ FORUM_ELASTICSEARCH_PROTOCOL }}://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}" +FORUM_ELASTICSEARCH_HOST_ES7: "localhost" +FORUM_ELASTICSEARCH_URL_ES7: "{{ FORUM_ELASTICSEARCH_PROTOCOL }}://{{ FORUM_ELASTICSEARCH_HOST_ES7 }}:{{ FORUM_ELASTICSEARCH_PORT }}" + +#Opensearch config +FORUM_OPENSEARCH_HOST: "localhost" +FORUM_OPENSEARCH_PORT: "9202" +FORUM_OPENSEARCH_PROTOCOL: "http" +FORUM_OPENSEARCH_URL: "{{ FORUM_OPENSEARCH_PROTOCOL }}://{{ FORUM_OPENSEARCH_HOST }}:{{ FORUM_OPENSEARCH_PORT }}" + +# This needs to be a string, set to 'false' to disable +FORUM_NEW_RELIC_ENABLE: '{{ COMMON_ENABLE_NEWRELIC_APP }}' +FORUM_NEW_RELIC_LICENSE_KEY: '{{ NEWRELIC_LICENSE_KEY | default("") }}' +FORUM_NEW_RELIC_APP_NAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-forum" + +# Datadog CONFIG +FORUM_DD_TAGS: "service:forum" + FORUM_WORKER_PROCESSES: "4" +FORUM_LISTEN_HOST: "0.0.0.0" +FORUM_LISTEN_PORT: "4567" +FORUM_USE_TCP: false + +# If forums fails to start because rake search:validate_index failed, +# wait this long before attempting to restart it +FORUM_RESTART_DELAY: 60 + +# Set to rebuild the forum ElasticSearch index from the database. +FORUM_REBUILD_INDEX: false -forum_environment: +FORUM_ENABLE_MONGODB_INDEXES: false + +forum_base_env: &forum_base_env + BUNDLE_GEMFILE: "{{ forum_gemfile }}" RBENV_ROOT: "{{ forum_rbenv_root }}" GEM_HOME: "{{ forum_gem_root }}" GEM_PATH: "{{ forum_gem_root }}" @@ -39,26 +76,40 @@ forum_environment: SINATRA_ENV: "{{ FORUM_SINATRA_ENV }}" API_KEY: "{{ FORUM_API_KEY }}" SEARCH_SERVER: "{{ FORUM_ELASTICSEARCH_URL }}" + SEARCH_SERVER_ES7: "{{ FORUM_ELASTICSEARCH_URL_ES7 }}" + SEARCH_SERVER_OS12: "{{ FORUM_OPENSEARCH_URL }}" MONGOHQ_URL: "{{ FORUM_MONGO_URL }}" + MONGOID_USE_SSL: "{{ FORUM_MONGO_USE_SSL }}" + MONGOID_AUTH_SOURCE: "{{ FORUM_MONGO_AUTH_DB }}" + MONGOID_AUTH_MECH: "{{ FORUM_MONGO_AUTH_MECH }}" HOME: "{{ forum_app_dir }}" + NEW_RELIC_ENABLE: "{{ FORUM_NEW_RELIC_ENABLE }}" NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}" NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}" WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}" DATA_DIR: "{{ forum_data_dir }}" + LISTEN_HOST: "{{ FORUM_LISTEN_HOST }}" + LISTEN_PORT: "{{ FORUM_LISTEN_PORT }}" + DD_TAGS: "{{ FORUM_DD_TAGS }}" + DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT: "true" + DD_TRACE_LOG_STREAM_HANDLER: "false" + +forum_env: + <<: *forum_base_env +devstack_forum_env: + <<: *forum_base_env + RACK_ENV: "development" + SINATRA_ENV: "development" + SEARCH_SERVER: "http://edx.devstack.elasticsearch:9200/" + SEARCH_SERVER_ES7: "http://edx.devstack.elasticsearch710:9200/" + SEARCH_SERVER_OS12: "http://edx.devstack.opensearch12:9202/" + MONGOHQ_URL: "mongodb://cs_comments_service:password@edx.devstack.mongo:27017/cs_comments_service" + MONGOID_AUTH_MECH: "{{ FORUM_MONGO_AUTH_MECH }}" forum_user: "forum" -forum_ruby_version: "1.9.3-p448" -forum_source_repo: "https://github.com/edx/cs_comments_service.git" -# Currently we are installing a branch of the comments service -# that configures unicorn to listen on a unix socket and get the -# worker count configuration from the environment. We are not -# merging to master of the comments service yet as this will have -# some incompatibilities with our Heroku deployments. -# -# https://github.com/edx/cs_comments_service/pull/83 -# -forum_version: "e0d/unicorn-config" -forum_unicorn_port: "4567" +FORUM_RUBY_VERSION: "3.0.4" +forum_source_repo: "https://github.com/openedx/cs_comments_service.git" +FORUM_VERSION: "master" # # test config @@ -71,4 +122,5 @@ forum_unicorn_port: "4567" # forum_services: - {service: "elasticsearch", host: "{{ FORUM_ELASTICSEARCH_HOST }}", port: "{{ FORUM_ELASTICSEARCH_PORT }}"} - + - {service: "elasticsearch7", host: "{{ FORUM_ELASTICSEARCH_HOST_ES7 }}", port: "{{ FORUM_ELASTICSEARCH_PORT }}"} + - {service: "opensearch12", host: "{{ FORUM_OPENSEARCH_HOST }}", port: "{{ FORUM_OPENSEARCH_PORT }}"} diff --git a/playbooks/roles/forum/handlers/main.yml b/playbooks/roles/forum/handlers/main.yml index dbd3eea5161..bfba1486f76 100644 --- a/playbooks/roles/forum/handlers/main.yml +++ b/playbooks/roles/forum/handlers/main.yml @@ -1,8 +1,8 @@ --- - name: restart the forum service - supervisorctl_local: > - name=forum - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - when: forum_installed is defined and not devstack + supervisorctl: + name: forum + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: forum_installed is defined and not disable_edx_services diff --git a/playbooks/roles/forum/meta/main.yml b/playbooks/roles/forum/meta/main.yml index cd115123c7b..218fa3618b5 100644 --- a/playbooks/roles/forum/meta/main.yml +++ b/playbooks/roles/forum/meta/main.yml @@ -1,9 +1,8 @@ --- dependencies: + - common - supervisor - role: rbenv - # TODO: setting the rbenv ownership to - # the common_web_user is a workaround - rbenv_user: "{{ common_web_user }}" + rbenv_user: "{{ forum_user }}" rbenv_dir: "{{ forum_app_dir }}" - rbenv_ruby_version: "{{ forum_ruby_version }}" + rbenv_ruby_version: "{{ FORUM_RUBY_VERSION }}" diff --git a/playbooks/roles/forum/tasks/deploy.yml b/playbooks/roles/forum/tasks/deploy.yml index 3fa400a024b..48ce1f407cd 100644 --- a/playbooks/roles/forum/tasks/deploy.yml +++ b/playbooks/roles/forum/tasks/deploy.yml @@ -1,36 +1,97 @@ --- - - name: create the supervisor config - template: > - src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf - owner={{ supervisor_user }} - mode=0644 - sudo_user: "{{ supervisor_user }}" - when: not devstack + template: + src: forum.conf.j2 + dest: "{{ supervisor_available_dir }}/forum.conf" + owner: "{{ supervisor_user }}" + group: "{{ supervisor_user }}" + mode: 0644 + become_user: "{{ supervisor_user }}" + register: forum_supervisor + tags: + - install + - install:configuration + +- name: enable the supervisor config + file: + src: "{{ supervisor_available_dir }}/forum.conf" + dest: "{{ supervisor_cfg_dir }}/forum.conf" + owner: "{{ supervisor_user }}" + state: link + force: yes + mode: 0644 + become_user: "{{ supervisor_user }}" + when: not disable_edx_services register: forum_supervisor + tags: + - install + - install:configuration - name: create the supervisor wrapper - template: > - src={{ forum_supervisor_wrapper|basename }}.j2 - dest={{ forum_supervisor_wrapper }} - mode=0755 - sudo_user: "{{ forum_user }}" - when: not devstack + template: + src: "{{ forum_supervisor_wrapper|basename }}.j2" + dest: "{{ forum_supervisor_wrapper }}" + mode: 0755 + become_user: "{{ forum_user }}" + notify: restart the forum service + tags: + - install + - install:configuration + +- name: git checkout forum repo into {{ forum_code_dir }} + git: + dest: "{{ forum_code_dir }}" + repo: "{{ forum_source_repo }}" + version: "{{ FORUM_VERSION }}" + accept_hostkey: yes + become_user: "{{ forum_user }}" + register: forum_checkout notify: restart the forum service + tags: + - install + - install:code -- name: git checkout forum repo into {{ forum_code_dir }} - git: dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }} - sudo_user: "{{ forum_user }}" +- name: configure comments service bundle + shell: "bundle config set --local deployment 'true' chdir={{ forum_code_dir }}" + become_user: "{{ forum_user }}" + environment: "{{ forum_base_env }}" notify: restart the forum service + tags: + - install + - install:app-requirements -# TODO: This is done as the common_web_user -# since the process owner needs write access -# to the rbenv - name: install comments service bundle - shell: bundle install chdir={{ forum_code_dir }} - sudo_user: "{{ common_web_user }}" - environment: "{{ forum_environment }}" + shell: "bundle install chdir={{ forum_code_dir }}" + become_user: "{{ forum_user }}" + environment: "{{ forum_base_env }}" notify: restart the forum service + tags: + - install + - install:app-requirements + +- name: initialize elasticsearch + command: "{{ forum_code_dir }}/bin/rake search:initialize" + args: + chdir: "{{ forum_code_dir }}" + become_user: "{{ forum_user }}" + environment: "{{ forum_base_env }}" + when: migrate_db is defined and migrate_db|lower == "yes" + run_once: yes + tags: + - migrate + - migrate:db + +- name: rebuild elasticsearch indices + command: "{{ forum_code_dir }}/bin/rake search:rebuild_indices" + args: + chdir: "{{ forum_code_dir }}" + become_user: "{{ forum_user }}" + environment: "{{ forum_base_env }}" + when: migrate_db is defined and migrate_db|lower == "yes" and FORUM_REBUILD_INDEX|bool + run_once: yes + tags: + - migrate + - migrate:db # call supervisorctl update. this reloads # the supervisorctl config and restarts @@ -40,17 +101,42 @@ - name: update supervisor configuration shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" register: supervisor_update - changed_when: supervisor_update.stdout != "" - when: not devstack + changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" + when: not disable_edx_services + tags: + - manage + - manage:update - name: ensure forum is started - supervisorctl_local: > - name=forum - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - when: not devstack + supervisorctl: + name: forum + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + when: not disable_edx_services + tags: + - manage + +- name: ensure indexes on forum mongo db + command: "{{ forum_code_dir }}/bin/rake db:init" + args: + chdir: "{{ forum_code_dir }}" + become_user: "{{ forum_user }}" + environment: "{{ forum_base_env }}" + when: FORUM_ENABLE_MONGODB_INDEXES + run_once: yes + tags: + - manage + - manage:db + +- include: test.yml + tags: + - deploy -- include: test.yml tags=deploy +- include: tag_ec2.yml + when: COMMON_TAG_EC2_INSTANCE + tags: + - deploy -- set_fact: forum_installed=true +- set_fact: + forum_installed: true diff --git a/playbooks/roles/forum/tasks/main.yml b/playbooks/roles/forum/tasks/main.yml index 2f25d4b7104..3db6947fc77 100644 --- a/playbooks/roles/forum/tasks/main.yml +++ b/playbooks/roles/forum/tasks/main.yml @@ -1,5 +1,4 @@ --- - # forum # # Dependencies: @@ -18,34 +17,90 @@ # - role: rbenv # rbenv_user: "{{ forum_user }}" # rbenv_dir: "{{ forum_rbenv_dir }}" -# rbenv_ruby_version: "{{ forum_ruby_version }}" +# rbenv_ruby_version: "{{ FORUM_RUBY_VERSION }}" # - forum - name: create application user - user: > - name="{{ forum_user }}" home="{{ forum_app_dir }}" - createhome=no - shell=/bin/false + user: + name: "{{ forum_user }}" + home: "{{ forum_app_dir }}" + createhome: yes + shell: /bin/false + generate_ssh_key: yes notify: restart the forum service + tags: + - install + - install:base -- name: create forum app dir - file: > - path="{{ forum_app_dir }}" state=directory - owner="{{ forum_user }}" group="{{ common_web_group }}" +# Ensure the directory is accessible to the web service +- name: set forum app dir permissions + file: + path: "{{ forum_app_dir }}" + state: directory + owner: "{{ forum_user }}" + group: "{{ common_web_group }}" notify: restart the forum service + tags: + - install + - install:base + + +- name: write devstack script + template: + src: "devstack.sh.j2" + dest: "{{ forum_app_dir }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: devstack is defined and devstack + tags: + - devstack + - devstack:install + -- name: setup the forum env - template: > - src=forum_env.j2 dest={{ forum_app_dir }}/forum_env - owner={{ forum_user }} group={{ common_web_user }} - mode=0644 +- name: setup the forum env for stage/prod + template: + src: forum_env.j2 + dest: "{{ forum_app_dir }}/forum_env" + owner: "{{ forum_user }}" + group: "{{ common_web_user }}" + mode: 0644 notify: - restart the forum service + tags: + - install + - install:base + - install:configuration + with_items: + - "{{ forum_env }}" + +- name: setup the forum env for devstack + template: + src: forum_env.j2 + dest: "{{ forum_app_dir }}/devstack_forum_env" + owner: "{{ forum_user }}" + group: "{{ common_web_user }}" + mode: 0644 + notify: + - restart the forum service + tags: + - install + - install:base + when: devstack is defined and devstack + with_items: + - "{{ devstack_forum_env }}" - name: create {{ forum_data_dir }} - file: > - path={{ forum_data_dir }} state=directory - owner="{{ common_web_user }}" group="{{ common_web_group }}" - mode=0777 - -- include: deploy.yml tags=deploy \ No newline at end of file + file: + path: "{{ forum_data_dir }}" + state: directory + owner: "{{ common_web_user }}" + group: "{{ common_web_group }}" + mode: 0777 + tags: + - install + - install:base + +- include: deploy.yml + tags: + - deploy diff --git a/playbooks/roles/forum/tasks/tag_ec2.yml b/playbooks/roles/forum/tasks/tag_ec2.yml new file mode 100644 index 00000000000..c064d55e798 --- /dev/null +++ b/playbooks/roles/forum/tasks/tag_ec2.yml @@ -0,0 +1,11 @@ +--- +- name: get instance information + action: ec2_metadata_facts + +- name: tag instance + ec2_tag: + resource: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + tags: + "version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after }}" + when: forum_checkout.after is defined diff --git a/playbooks/roles/forum/tasks/test.yml b/playbooks/roles/forum/tasks/test.yml index c9bedc5e160..201713257cb 100644 --- a/playbooks/roles/forum/tasks/test.yml +++ b/playbooks/roles/forum/tasks/test.yml @@ -1,11 +1,16 @@ --- - - name: test that the required service are listening - wait_for: port={{ item.port }} host={{ item.host }} timeout=30 - with_items: forum_services - when: not devstack + wait_for: + port: "{{ item.port }}" + host: "{{ item.host }}" + timeout: 30 + with_items: "{{ forum_services }}" + when: not disable_edx_services - name: test that mongo replica set members are listing - wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30 - with_items: FORUM_MONGO_HOSTS - when: not devstack \ No newline at end of file + wait_for: + port: "{{ FORUM_MONGO_PORT }}" + host: "{{ item }}" + timeout: 30 + with_items: "{{ FORUM_MONGO_HOSTS }}" + when: not disable_edx_services diff --git a/playbooks/roles/forum/templates/cs_comments_service.conf.j2 b/playbooks/roles/forum/templates/cs_comments_service.conf.j2 index 9022fdc623e..523261bf436 100644 --- a/playbooks/roles/forum/templates/cs_comments_service.conf.j2 +++ b/playbooks/roles/forum/templates/cs_comments_service.conf.j2 @@ -10,7 +10,7 @@ env PID=/var/tmp/comments_service.pid chdir {{ forum_code_dir }} script - . {{forum_app_dir}}/forum_env - {{forum_app_dir}}/.rbenv/shims/ruby app.rb + . {{ forum_app_dir }}/forum_env + {{ forum_app_dir }}/.rbenv/shims/ruby app.rb end script diff --git a/playbooks/roles/forum/templates/devstack.sh.j2 b/playbooks/roles/forum/templates/devstack.sh.j2 new file mode 100644 index 00000000000..ab025fa7256 --- /dev/null +++ b/playbooks/roles/forum/templates/devstack.sh.j2 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ forum_app_dir }}/devstack_forum_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + cd {{ forum_code_dir }} + + /bin/bash + ;; + exec) + shift + + cd {{ forum_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/forum/templates/forum-supervisor.sh.j2 b/playbooks/roles/forum/templates/forum-supervisor.sh.j2 index 0894a3f344b..e7add35e1b6 100644 --- a/playbooks/roles/forum/templates/forum-supervisor.sh.j2 +++ b/playbooks/roles/forum/templates/forum-supervisor.sh.j2 @@ -5,6 +5,14 @@ cd {{ forum_code_dir }} {% if devstack %} {{ forum_rbenv_shims }}/ruby app.rb +{% elif FORUM_USE_TCP %} +{{ forum_binstubs_dir }}/unicorn -c config/unicorn_tcp.rb -I '.' {% else %} -{{ forum_gem_bin }}/unicorn -c config/unicorn.rb +{{ forum_binstubs_dir }}/unicorn -c config/unicorn.rb -I '.' {% endif %} + +# If forums fails to start because elasticsearch isn't migrated, sleep so supervisord +# doesn't attempt to restart it immediately. +# 101 is the magic exit code forums uses to mean "rake search:validate_index failed" +exit_code="$?" +[ "$exit_code" -eq 101 ] && sleep {{ FORUM_RESTART_DELAY }} && exit "$exit_code" diff --git a/playbooks/roles/forum/templates/forum.conf.j2 b/playbooks/roles/forum/templates/forum.conf.j2 index 350991a48c5..ba3d9ff0ee2 100644 --- a/playbooks/roles/forum/templates/forum.conf.j2 +++ b/playbooks/roles/forum/templates/forum.conf.j2 @@ -2,7 +2,8 @@ command={{ forum_supervisor_wrapper }} priority=999 user={{ common_web_user }} -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log killasgroup=true stopasgroup=true +stopsignal=QUIT diff --git a/playbooks/roles/forum/templates/forum_env.j2 b/playbooks/roles/forum/templates/forum_env.j2 index 015f82401f4..b1bf3c91089 100644 --- a/playbooks/roles/forum/templates/forum_env.j2 +++ b/playbooks/roles/forum/templates/forum_env.j2 @@ -1,9 +1,9 @@ # {{ ansible_managed }} -{% for name,value in forum_environment.items() -%} +{% for name,value in item.items() -%} {%- if value -%} export {{ name }}="{{ value }}" -{% endif %} +{% endif %} {%- endfor %} eval "$(rbenv init -)" diff --git a/playbooks/roles/gerrit/defaults/main.yml b/playbooks/roles/gerrit/defaults/main.yml deleted file mode 100644 index f44f9dd9b7f..00000000000 --- a/playbooks/roles/gerrit/defaults/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -## -# Defaults for role gerrit -# - -gerrit_app_dir: "{{ COMMON_APP_DIR }}/gerrit" -gerrit_data_dir: "{{ COMMON_DATA_DIR }}/gerrit" - -gerrit_debian_pkgs: - - python-mysqldb - - python-boto -gerrit_release: 2.8.1 -gerrit_user: gerrit2 -gerrit_db_name: reviewdb -gerrit_http_port: 8080 -gerrit_sshd_port: 29418 -gerrit_jre_path: /usr/lib/jvm/java-7-oracle/jre -gerrit_java_exe_path: "{{ gerrit_jre_path }}/bin/java" -gerrit_repo_volume_os_device: /dev/xvdf diff --git a/playbooks/roles/gerrit/handlers/main.yml b/playbooks/roles/gerrit/handlers/main.yml deleted file mode 100644 index 1e3889c4222..00000000000 --- a/playbooks/roles/gerrit/handlers/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Handlers for role gerrit - -- name: gerrit restarted - service: name=gerrit state=restarted - -- name: nginx restarted - service: name=nginx state=restarted diff --git a/playbooks/roles/gerrit/meta/main.yml b/playbooks/roles/gerrit/meta/main.yml deleted file mode 100644 index 09ad458b118..00000000000 --- a/playbooks/roles/gerrit/meta/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -dependencies: - - role: oraclejdk - - role: nginx - nginx_sites: [] diff --git a/playbooks/roles/gerrit/tasks/main.yml b/playbooks/roles/gerrit/tasks/main.yml deleted file mode 100644 index 625bf39bda6..00000000000 --- a/playbooks/roles/gerrit/tasks/main.yml +++ /dev/null @@ -1,169 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role gerrit -# -# Overview: Installs and configures Gerrit on the server. Requires -# several secure variables to be defined that are not defined in this -# role. -# -# -# Dependencies: -# - An existing running database. -# - An S3 bucket containing all of the necessary plugin jars. -# - In addition to the variables defined in defaults/main.yml, the following variables must be defined: -# -# gerrit_github_client_id: alskdjdfkjasdjfsdlfkj -# gerrit_github_client_secret: 0938908450deffaaa87665a555a6fc6de5777f77f -# gerrit_db_hostname: somedb.88374jhyehf.us-east-1.rds.amazonaws.com -# gerrit_db_admin_username: adminuser -# gerrit_db_admin_password: adminpassword -# gerrit_db_password: gerrituserpassword -# gerrit_artifact_s3_bucket: -# name: some-s3-bucket -# aws_access_key_id: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" -# aws_secret_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" -# gerrit_hostname: "gerrit.example.com" -# gerrit_smtp_enabled: false -# gerrit_email: gerrit@example.com -# gerrit_smtp_server: smtp.example.com -# gerrit_smtp_encryption: none -# gerrit_smtp_user: someuser -# gerrit_smtp_pass: somepassword -# -# -# Example play: -# -# - name: Deploy gerrit -# hosts: gerrit -# gather_facts: True -# sudo: True -# roles: -# - gerrit - -- name: system package pre-requisites installed - apt: pkg={{ item }} - with_items: gerrit_debian_pkgs - -- name: user - user: name={{ gerrit_user }} system=yes home={{ gerrit_app_dir }} createhome=no - -- name: directories created - file: path={{ item }} mode=700 owner={{ gerrit_user }} state=directory - with_items: - - "{{ gerrit_app_dir }}" - - "{{ gerrit_app_dir }}/etc" - - "{{ gerrit_data_dir }}" - -- name: repository volume fs exists - shell: file -s {{ gerrit_repo_volume_os_device }} | grep ext4 - ignore_errors: yes - register: is_formatted - -- name: repository volume formatted - command: mkfs -t ext4 {{ gerrit_repo_volume_os_device }} - when: is_formatted | failed - -- name: fstab includes repository volume - lineinfile: > - dest=/etc/fstab - regexp="^{{ gerrit_repo_volume_os_device }}\s" - line="{{ gerrit_repo_volume_os_device }} {{ gerrit_data_dir }} ext4 defaults 0 2" - -# output will look roughly like: -# /dev/foo on /some/mount/point type ext4 (options) -- name: repository volume is mounted - shell: > - mount -l | grep '^{{ gerrit_repo_volume_os_device }} ' - ignore_errors: yes - register: is_mounted - -- name: repository volume mounted - command: mount {{ gerrit_repo_volume_os_device }} - when: is_mounted | failed - -- name: war file downloaded - s3: > - bucket={{ gerrit_artifact_s3_bucket.name }} - object=gerrit-{{ gerrit_release }}.war - dest=/tmp/gerrit.war - mode=get - aws_access_key="{{ gerrit_artifact_s3_bucket.aws_access_key_id }}" - aws_secret_key="{{ gerrit_artifact_s3_bucket.aws_secret_access_key }}" - sudo_user: "{{ gerrit_user }}" - -- name: database created - mysql_db: > - name={{ gerrit_db_name }} - encoding=utf8 - login_host={{ gerrit_db_hostname }} login_user={{ gerrit_db_admin_username }} login_password={{ gerrit_db_admin_password }} - register: db_created - -- name: database user created - mysql_user: > - name={{ gerrit_user }} - password={{ gerrit_db_password }} - host='%' - priv="{{ gerrit_db_name }}.*:ALL" - login_host={{ gerrit_db_hostname }} login_user={{ gerrit_db_admin_username }} login_password={{ gerrit_db_admin_password }} - -- name: configuration uploaded - template: src=gerrit.config.j2 dest={{ gerrit_app_dir }}/etc/gerrit.config mode=600 - sudo_user: "{{ gerrit_user }}" - notify: gerrit restarted - -- name: initialized - command: > - {{ gerrit_java_exe_path }} -jar /tmp/gerrit.war init -d {{ gerrit_app_dir }} --batch --no-auto-start - creates={{ gerrit_app_dir }}/bin - sudo_user: "{{ gerrit_user }}" - notify: gerrit restarted - -- name: artifacts installed from s3 - s3: > - bucket={{ gerrit_artifact_s3_bucket.name }} - object={{ item.jar }} - dest={{ item.dest }}/{{ item.jar }} - mode=get - aws_access_key="{{ gerrit_artifact_s3_bucket.aws_access_key_id }}" - aws_secret_key="{{ gerrit_artifact_s3_bucket.aws_secret_access_key }}" - sudo_user: "{{ gerrit_user }}" - notify: gerrit restarted - with_items: - - { jar: "github-oauth-{{ gerrit_release }}.jar", dest: "{{ gerrit_app_dir }}/lib" } - - { jar: "github-plugin-{{ gerrit_release }}.jar", dest: "{{ gerrit_app_dir }}/plugins" } - - { jar: "singleusergroup-{{ gerrit_release }}.jar", dest: "{{ gerrit_app_dir }}/plugins" } - -- name: plugins installed from war - shell: unzip -p /tmp/gerrit.war WEB-INF/plugins/replication.jar > {{ gerrit_app_dir }}/plugins/replication.jar creates={{ gerrit_app_dir }}/plugins/replication.jar - sudo_user: "{{ gerrit_user }}" - notify: gerrit restarted - -- name: setup ngnix vhost - template: > - src=nginx-gerrit.j2 - dest={{ nginx_sites_available_dir }}/gerrit - -- name: enable gerrit vhost - file: > - src={{ nginx_sites_available_dir }}/gerrit - dest={{ nginx_sites_enabled_dir }}/gerrit - state=link - notify: nginx restarted - -- name: init script configured - template: src=gerritcodereview.j2 dest=/etc/default/gerritcodereview mode=644 - -- name: init script installed - file: src={{ gerrit_app_dir }}/bin/gerrit.sh dest=/etc/init.d/gerrit state=link - -- name: starts on boot - service: name=gerrit enabled=yes diff --git a/playbooks/roles/gerrit/templates/gerrit.config.j2 b/playbooks/roles/gerrit/templates/gerrit.config.j2 deleted file mode 100644 index ed509810c8c..00000000000 --- a/playbooks/roles/gerrit/templates/gerrit.config.j2 +++ /dev/null @@ -1,41 +0,0 @@ -# {{ ansible_managed }} - -[gerrit] - basePath = {{ gerrit_data_dir }} - canonicalWebUrl = http://{{ gerrit_hostname }}/ -[database] - type = MYSQL - hostname = {{ gerrit_db_hostname }} - database = {{ gerrit_db_name }} - username = {{ gerrit_user }} - password = {{ gerrit_db_password }} -[auth] - type = HTTP - httpHeader = GITHUB_USER - loginUrl = /login - logoutUrl = /oauth/reset -[sendemail] - enable = {{ gerrit_smtp_enabled }} - smtpServer = {{ gerrit_smtp_server }} - smtpEncryption = {{ gerrit_smtp_encryption }} - smtpUser = {{ gerrit_smtp_user }} - smtpPass = {{ gerrit_smtp_pass }} -[container] - user = {{ gerrit_user }} - javaHome = {{ gerrit_jre_path }} -[sshd] - listenAddress = *:{{ gerrit_sshd_port }} -[httpd] - listenUrl = http://*:{{ gerrit_http_port }}/ - filterClass = com.googlesource.gerrit.plugins.github.oauth.OAuthFilter -[cache] - directory = cache -[github] - url = https://github.com - clientId = {{ gerrit_github_client_id }} - clientSecret = {{ gerrit_github_client_secret }} -[user] - email = {{ gerrit_email }} - anonymousCoward = Anonymous User -[suggest] - accounts = true diff --git a/playbooks/roles/gerrit/templates/gerritcodereview.j2 b/playbooks/roles/gerrit/templates/gerritcodereview.j2 deleted file mode 100644 index ce698590149..00000000000 --- a/playbooks/roles/gerrit/templates/gerritcodereview.j2 +++ /dev/null @@ -1 +0,0 @@ -export GERRIT_SITE={{ gerrit_app_dir }} diff --git a/playbooks/roles/gerrit/templates/nginx-gerrit.j2 b/playbooks/roles/gerrit/templates/nginx-gerrit.j2 deleted file mode 100644 index fe51dda5952..00000000000 --- a/playbooks/roles/gerrit/templates/nginx-gerrit.j2 +++ /dev/null @@ -1,11 +0,0 @@ -server { - listen 80; - server_name {{ gerrit_hostname }}; - - location / { - proxy_pass http://localhost:{{ gerrit_http_port }}; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} \ No newline at end of file diff --git a/playbooks/roles/gh_mirror/defaults/main.yml b/playbooks/roles/gh_mirror/defaults/main.yml index 6b9a6da28ee..900a6d5ec0d 100644 --- a/playbooks/roles/gh_mirror/defaults/main.yml +++ b/playbooks/roles/gh_mirror/defaults/main.yml @@ -1,10 +1,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # diff --git a/playbooks/roles/gh_mirror/files/repos_from_orgs.py b/playbooks/roles/gh_mirror/files/repos_from_orgs.py index a1db309a555..1c6a28d6e7b 100644 --- a/playbooks/roles/gh_mirror/files/repos_from_orgs.py +++ b/playbooks/roles/gh_mirror/files/repos_from_orgs.py @@ -24,12 +24,12 @@ def check_running(run_type=''): fp = open(pid_file, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError: + except OSError: # another instance is running sys.exit(0) def run_cmd(cmd): - logging.debug('running: {}\n'.format(cmd)) + logging.debug(f'running: {cmd}\n') process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) @@ -49,9 +49,9 @@ def refresh_cache(): path = dirname(abspath(__file__)) try: with open(join(path, 'orgs.yml')) as f: - orgs = yaml.load(f) - except IOError: - print "Unable to read {}/orgs.yml, does it exist?".format(path) + orgs = yaml.safe_load(f) + except OSError: + print(f"Unable to read {path}/orgs.yml, does it exist?") sys.exit(1) repos = [] @@ -59,7 +59,7 @@ def refresh_cache(): for org in orgs: page = 1 while True: - r = requests.get('https://api.github.com/users/%7B%7D/repos?page={}'.format(org, page)) + r = requests.get(f'https://api.github.com/users/%7Borg%7D/repos?page={page}') org_data = r.json() # request pages until we get zero results if not isinstance(org_data, list) or len(org_data) == 0: @@ -80,12 +80,12 @@ def update_repos(): for repo in repos: repo_path = os.path.join(args.datadir, repo['org'], repo['name'] + '.git') if not os.path.exists(repo_path): - run_cmd('mkdir -p {}'.format(repo_path)) + run_cmd(f'mkdir -p {repo_path}') run_cmd('git clone --mirror {} {}'.format(repo['html_url'], repo_path)) - run_cmd('cd {} && git update-server-info'.format(repo_path)) + run_cmd(f'cd {repo_path} && git update-server-info') else: - run_cmd('cd {} && git fetch --all --tags'.format(repo_path)) - run_cmd('cd {} && git update-server-info'.format(repo_path)) + run_cmd(f'cd {repo_path} && git fetch --all --tags') + run_cmd(f'cd {repo_path} && git update-server-info') if __name__ == '__main__': args = parse_args() @@ -97,7 +97,7 @@ def update_repos(): else: check_running() if not args.datadir: - print "Please specificy a repository directory" + print("Please specificy a repository directory") sys.exit(1) if not os.path.exists('/var/tmp/repos.json'): refresh_cache() diff --git a/playbooks/roles/gh_mirror/meta/main.yml b/playbooks/roles/gh_mirror/meta/main.yml index 107f1e98c29..6d2cae542c5 100644 --- a/playbooks/roles/gh_mirror/meta/main.yml +++ b/playbooks/roles/gh_mirror/meta/main.yml @@ -1,3 +1,4 @@ --- dependencies: + - common - supervisor diff --git a/playbooks/roles/gh_mirror/tasks/main.yml b/playbooks/roles/gh_mirror/tasks/main.yml index 2880886b4e0..e0802f98b0a 100644 --- a/playbooks/roles/gh_mirror/tasks/main.yml +++ b/playbooks/roles/gh_mirror/tasks/main.yml @@ -1,10 +1,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -30,37 +30,39 @@ --- - name: install pip packages pip: name={{ item }} state=present - with_items: gh_mirror_pip_pkgs + with_items: "{{ gh_mirror_pip_pkgs }}" - name: install debian packages - apt: > - pkg={{ ",".join(gh_mirror_debian_pkgs) }} - state=present - update_cache=yes + apt: + pkg: '{{ ",".join(gh_mirror_debian_pkgs) }}' + state: present + update_cache: yes - name: create gh_mirror user - user: > - name={{ gh_mirror_user }} - state=present + user: + name: "{{ gh_mirror_user }}" + state: present - name: create the gh_mirror data directory - file: > - path={{ gh_mirror_data_dir }} - state=directory - owner={{ gh_mirror_user }} - group={{ gh_mirror_group }} + file: + path: "{{ gh_mirror_data_dir }}" + state: directory + owner: "{{ gh_mirror_user }}" + group: "{{ gh_mirror_group }}" - name: create the gh_mirror app directory - file: > - path={{ gh_mirror_app_dir }} - state=directory + file: + path: "{{ gh_mirror_app_dir }}" + state: directory - name: create org config template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml - name: copying sync scripts - copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }} - with_items: gh_mirror_app_files + copy: + src: "{{ item }}" + dest: "{{ gh_mirror_app_dir }}/{{ item }}" + with_items: "{{ gh_mirror_app_files }}" - name: creating cron job to update repos cron: diff --git a/playbooks/roles/gh_users/defaults/main.yml b/playbooks/roles/gh_users/defaults/main.yml deleted file mode 100644 index e46a72266d1..00000000000 --- a/playbooks/roles/gh_users/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ -# override this var to add a prefix to the prompt -# also need to set commont_update_bashrc for to -# update the system bashrc default -GH_USERS_PROMPT: "" -gh_users: [] -gh_users_no_sudo: [] diff --git a/playbooks/roles/gh_users/tasks/main.yml b/playbooks/roles/gh_users/tasks/main.yml deleted file mode 100644 index 0e55b81a7f1..00000000000 --- a/playbooks/roles/gh_users/tasks/main.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- - -# gh_users -# -# Creates OS accounts for users based on their github credential. -# Takes a list gh_users as a parameter which is a list of users -# -# roles: -# - role: gh_users -# gh_users: -# - joe -# - mark -# gh_users_no_sudo: -# - tourist_dave - - -- name: creating default .bashrc - template: > - src=default.bashrc.j2 dest=/etc/skel/.bashrc - mode=0644 owner=root group=root - -- name: create gh group - group: name=gh state=present - -# TODO: give limited sudo access to this group -- name: grant full sudo access to gh group - copy: > - content="%gh ALL=(ALL) NOPASSWD:ALL" - dest=/etc/sudoers.d/gh owner=root group=root - mode=0440 validate='visudo -cf %s' - -- name: create sudo github users - user: - name={{ item }} groups=gh - shell=/bin/bash - with_items: gh_users - -- name: create non-sudo github users - user: - name={{ item }} - shell=/bin/bash - with_items: gh_users_no_sudo - -- name: create .ssh directory - file: - path=/home/{{ item }}/.ssh state=directory mode=0700 - owner={{ item }} - with_items: gh_users + gh_users_no_sudo - -- name: copy github key[s] to .ssh/authorized_keys - get_url: - url=https://github.com/{{ item }}.keys - dest=/home/{{ item }}/.ssh/authorized_keys mode=0600 - owner={{ item }} - with_items: gh_users + gh_users_no_sudo - diff --git a/playbooks/roles/ghost/defaults/main.yml b/playbooks/roles/ghost/defaults/main.yml new file mode 100644 index 00000000000..ff9f7b42ff1 --- /dev/null +++ b/playbooks/roles/ghost/defaults/main.yml @@ -0,0 +1,5 @@ +--- +GHOST_VERSION: 1.0.48 +ghost_package_name: gh-ost +ghost_download_target: "/tmp/{{ ghost_package_name }}_{{ GHOST_VERSION }}.deb" +ghost_package_url: https://github.com/github/gh-ost/releases/download/v{{ GHOST_VERSION }}/gh-ost_{{ GHOST_VERSION }}_amd64.deb diff --git a/playbooks/roles/ghost/tasks/main.yml b/playbooks/roles/ghost/tasks/main.yml new file mode 100644 index 00000000000..bbfcc29cf8c --- /dev/null +++ b/playbooks/roles/ghost/tasks/main.yml @@ -0,0 +1,27 @@ +- name: Check if package is installed and version is correct + command: dpkg -s gh-ost | grep Version | cut -d ':' -f 3 | grep {{ GHOST_VERSION }} + register: ghost_correct + ignore_errors: True + +- name: Remove package if exists + apt: + name: "{{ ghost_package_name }}" + state: absent + when: ghost_correct.rc != 0 + +- name: Download package + get_url: + url="{{ ghost_package_url }}" + dest="{{ ghost_download_target }}" + when: ghost_correct.rc != 0 + +- name: Install ghost_package + apt: deb="{{ ghost_download_target }}" + become: true + when: ghost_correct.rc != 0 + +- name: "Install percona packages for dropping large tables" + apt: + name: + - "percona-toolkit" + state: "present" diff --git a/playbooks/roles/git_clone/defaults/main.yml b/playbooks/roles/git_clone/defaults/main.yml new file mode 100644 index 00000000000..8b727eca79e --- /dev/null +++ b/playbooks/roles/git_clone/defaults/main.yml @@ -0,0 +1,34 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Example play: +# +# Rather than being included in the play, this role +# is included as a dependency by other roles in the meta/main.yml +# file. The including role should add the following +# dependency definition. +# +# dependencies: +# - role: git_clone +# repo_owner: edx-themes +# repo_group: edx-themes +# git_home: /edx/etc/edx-themes +# GIT_REPOS: +# - PROTOCOL: ssh +# DOMAIN: github.com +# PATH: edx +# REPO: sample-themes.git +# VERSION: release +# DESTINATION: /edx/etc/edx-themes/edx-themes +# SSH_KEY: "{{ THEMES_GIT_IDENTITY }}" +# +## +# Defaults for role git_clone + +GIT_CLONE_NO_LOGGING: true diff --git a/playbooks/roles/git_clone/meta/main.yml b/playbooks/roles/git_clone/meta/main.yml new file mode 100644 index 00000000000..98c26a7a0ee --- /dev/null +++ b/playbooks/roles/git_clone/meta/main.yml @@ -0,0 +1,15 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role git_clone +# + +# Allow this role to be duplicated in dependencies +allow_duplicates: yes diff --git a/playbooks/roles/git_clone/tasks/main.yml b/playbooks/roles/git_clone/tasks/main.yml new file mode 100644 index 00000000000..e05ab4cdd05 --- /dev/null +++ b/playbooks/roles/git_clone/tasks/main.yml @@ -0,0 +1,143 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role git_clone +# +# Overview: +# +# This role performs the repetitive tasks that most edX roles +# require in our default configuration. +# +# +# Rewrite this task using the ansible git-config module once we'll migrate to Ansible 2.x +# https://docs.ansible.com/ansible/git_config_module.html#git-config + +# On devstack, tell Git that repos owner by other users are safe. +# This is necessary in git 2.35.2 and higher. Devstack uses a mix of +# root and edxapp and git+https pip dependencies end up cloning repos +# into an open-ended set of directories, so our best bet is to just +# say every dir on devstack is safe. +- name: Mark all directories as safe for git on devstack + shell: "git config --global --add safe.directory '*'" + become: true + when: "({{ devstack | default(False) }} or {{ edx_django_service_is_devstack | default(False) }})" + tags: + - devstack + +- name: Set git fetch.prune to ignore deleted remote refs + shell: git config --global fetch.prune true + become_user: "{{ repo_owner }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + when: repo_owner is defined and GIT_REPOS|length > 0 + tags: + - install + - install:code + +- name: Validate git protocol + fail: + msg: '{{ GIT_REPOS.PROTOCOL }} must be "https" or "ssh"' + when: (item.PROTOCOL != "https") and (item.PROTOCOL != "ssh") and GIT_REPOS is defined + with_items: "{{ GIT_REPOS }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + tags: + - install + - install:code + + +- name: Install read-only ssh key + copy: + dest: "{{ git_home }}/.ssh/{{ item.REPO }}" + content: "{{ item.SSH_KEY }}" + owner: "{{ repo_owner }}" + group: "{{ repo_group }}" + mode: "0600" + when: item.PROTOCOL == "ssh" and GIT_REPOS is defined + with_items: "{{ GIT_REPOS }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + tags: + - install + - install:code + +- name: Check that working tree is clean + shell: test ! -e "{{ item }}" || git -C "{{ item }}" status --porcelain --untracked-files=no + register: dirty_files + become_user: "{{ repo_owner }}" + # Using the map here means that the items will only be the DESTINATION strings, + # rather than the full GIT_REPOS structures, which have data we don't want to log, + # so we don't have to use no_log on this task. + with_items: "{{ GIT_REPOS | map(attribute='DESTINATION') | list }}" + +- name: Require a clean working tree + fail: + msg: "There are modified files in {{ item.item }}: {{ item.stdout }}" + when: item.stdout|length > 0 + with_items: "{{ dirty_files.results }}" + +- name: Checkout code over ssh + git: + repo: "git@{{ item.DOMAIN }}:{{ item.PATH }}/{{ item.REPO }}" + dest: "{{ item.DESTINATION }}" + version: "{{ item.VERSION }}" + accept_hostkey: yes + key_file: "{{ git_home }}/.ssh/{{ item.REPO }}" + depth: 1 + become_user: "{{ repo_owner }}" + register: code_checkout + when: item.PROTOCOL == "ssh" and GIT_REPOS is defined + with_items: "{{ GIT_REPOS }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + tags: + - install + - install:code + +- name: Checkout code over https + git: + repo: "https://{{ item.DOMAIN }}/{{ item.PATH }}/{{ item.REPO }}" + dest: "{{ item.DESTINATION }}" + version: "{{ item.VERSION }}" + depth: 1 + become_user: "{{ repo_owner }}" + register: code_checkout + when: item.PROTOCOL == "https" and GIT_REPOS is defined + with_items: "{{ GIT_REPOS }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + tags: + - install + - install:code + +- name: Remove read-only ssh key + file: + dest: "{{ git_home }}/.ssh/{{ item.REPO }}" + state: absent + when: item.PROTOCOL == "ssh" and GIT_REPOS is defined + with_items: "{{ GIT_REPOS }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + tags: + - install + - install:code + +- name: Run git safe.directory + shell: git config --global --add safe.directory {{ item.DESTINATION }} + become: true + with_items: "{{ GIT_REPOS }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + tags: + - install + - install:code + +- name: Run git clean after checking out code + shell: cd {{ item.DESTINATION }} && git clean -xdf + become: true + with_items: "{{ GIT_REPOS }}" + no_log: "{{ GIT_CLONE_NO_LOGGING }}" + tags: + - install + - install:code diff --git a/playbooks/roles/gitreload/defaults/main.yml b/playbooks/roles/gitreload/defaults/main.yml new file mode 100644 index 00000000000..4205b7e99a4 --- /dev/null +++ b/playbooks/roles/gitreload/defaults/main.yml @@ -0,0 +1,45 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role gitreload +# + +GITRELOAD_COURSE_CHECKOUT: false +GITRELOAD_GIT_IDENTITY: !!null +GITRELOAD_REPOS: + - name: "edx-demo-course" + url: "https://github.com/openedx/edx-demo-course.git" + commit: "master" +GITRELOAD_REPODIR: "{{ EDXAPP_GIT_REPO_DIR }}" +GITRELOAD_LOG_LEVEL: "debug" +GITRELOAD_HOSTS: + - github.com +GITRELOAD_NUM_THREADS: 1 +GITRELOAD_NGINX_PORT: "18095" +GITRELOAD_GUNICORN_EXTRA: "" +GITRELOAD_GUNICORN_EXTRA_CONF: "" + +GITRELOAD_VERSION: "master" +gitreload_dir: "{{ COMMON_APP_DIR }}/gitreload" +gitreload_user: "gitreload" +gitreload_repo: "https://github.com/mitodl/gitreload.git" +gitreload_gunicorn_port: "8095" +gitreload_venv: "{{ gitreload_dir }}/venvs/gitreload" +gitreload_venv_bin: "{{ gitreload_venv }}/bin" +gitreload_gunicorn_workers: 1 +gitreload_gunicorn_host: "127.0.0.1" + +gitreload_env: + REPODIR: "{{ GITRELOAD_REPODIR }}" + LOG_LEVEL: "{{ GITRELOAD_LOG_LEVEL }}" + NUM_THREADS: "{{ GITRELOAD_NUM_THREADS }}" + VIRTUAL_ENV: "{{ edxapp_venv_dir }}" + EDX_PLATFORM: "{{ edxapp_code_dir }}" + DJANGO_SETTINGS: "{{ edxapp_settings }}" diff --git a/playbooks/roles/gitreload/handlers/main.yml b/playbooks/roles/gitreload/handlers/main.yml new file mode 100644 index 00000000000..0737e499e88 --- /dev/null +++ b/playbooks/roles/gitreload/handlers/main.yml @@ -0,0 +1,23 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Handlers for role gitreload +# +# Overview: +# +# +- name: restart gitreload + supervisorctl: + name: gitreload + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: not disable_edx_services diff --git a/playbooks/roles/gitreload/meta/main.yml b/playbooks/roles/gitreload/meta/main.yml new file mode 100644 index 00000000000..5ce35da7024 --- /dev/null +++ b/playbooks/roles/gitreload/meta/main.yml @@ -0,0 +1,15 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role gitreload +# +dependencies: + - common + - supervisor diff --git a/playbooks/roles/gitreload/tasks/course_pull.yml b/playbooks/roles/gitreload/tasks/course_pull.yml new file mode 100644 index 00000000000..60ec6696144 --- /dev/null +++ b/playbooks/roles/gitreload/tasks/course_pull.yml @@ -0,0 +1,34 @@ +# Tasks to run if cloning repos to edx-platform. + +- name: clone all course repos + git: dest={{ GITRELOAD_REPODIR }}/{{ item.name }} repo={{ item.url }} version={{ item.commit }} + become_user: "{{ common_web_user }}" + with_items: "{{ GITRELOAD_REPOS }}" + +- name: do import of courses + shell: "SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/python manage.py lms --settings={{ edxapp_settings }} git_add_course {{ item.url }} {{ GITRELOAD_REPODIR }}/{{ item.name }}" + args: + executable: "/bin/bash" + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + with_items: "{{ GITRELOAD_REPOS }}" + +- name: change ownership on repos for access by edxapp and www-data + file: + path: "{{ GITRELOAD_REPODIR }}" + state: directory + owner: "{{ common_web_user }}" + group: "{{ common_web_group }}" + recurse: yes + +- name: change group on repos if using devstack + file: + path: "{{ GITRELOAD_REPODIR }}" + state: directory + group: "{{ edxapp_user }}" + recurse: yes + when: devstack + +- name: change mode on repos with using devstack + command: "chmod -R o=rwX,g=srwX,o=rX {{ GITRELOAD_REPODIR }}" + when: devstack diff --git a/playbooks/roles/gitreload/tasks/deploy.yml b/playbooks/roles/gitreload/tasks/deploy.yml new file mode 100644 index 00000000000..9ef1f48a3d8 --- /dev/null +++ b/playbooks/roles/gitreload/tasks/deploy.yml @@ -0,0 +1,77 @@ +- name: create ssh dir for the content repos key + file: + path: "~/.ssh" + state: "directory" + mode: "0700" + become_user: "{{ common_web_user }}" + +- name: install ssh key for the content repos + copy: + content: "{{ GITRELOAD_GIT_IDENTITY }}" + dest: "~/.ssh/id_rsa" + mode: "0600" + become_user: "{{ common_web_user }}" + +- include: course_pull.yml + when: GITRELOAD_COURSE_CHECKOUT|bool + tags: course_pull + +- name: install gitreload + pip: + name: "git+{{ gitreload_repo }}@{{ GITRELOAD_VERSION }}#egg=gitreload" + virtualenv: "{{ gitreload_venv }}" + extra_args: "--exists-action w" + become_user: "{{ gitreload_user }}" + notify: restart gitreload + +- name: copy configuration + template: + src: "edx/app/gitreload/gr.env.json.j2" + dest: "{{ gitreload_dir }}/gr.env.json" + become_user: "{{ gitreload_user }}" + notify: restart gitreload + +- name: "add gunicorn configuration file" + template: + src: "edx/app/gitreload/gitreload_gunicorn.py.j2" + dest: "{{ gitreload_dir }}/gitreload_gunicorn.py" + become_user: "{{ gitreload_user }}" + notify: restart gitreload + +- name: "writing supervisor script" + template: + src: "edx/app/supervisor/conf.available.d/gitreload.conf.j2" + dest: "{{ supervisor_available_dir }}/gitreload.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0644" + +- name: "enable supervisor script" + file: + src: "{{ supervisor_available_dir }}/gitreload.conf" + dest: "{{ supervisor_cfg_dir }}/gitreload.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0644" + state: link + force: "yes" + when: not disable_edx_services + + # call supervisorctl update. this reloads + # the supervisorctl config and restarts + # the services if any of the configurations + # have changed. + # +- name: update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + register: supervisor_update + changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" + when: not disable_edx_services + +- name: ensure gitreload is started + supervisorctl: + name: gitreload + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + when: not disable_edx_services diff --git a/playbooks/roles/gitreload/tasks/main.yml b/playbooks/roles/gitreload/tasks/main.yml new file mode 100644 index 00000000000..00c67abadd9 --- /dev/null +++ b/playbooks/roles/gitreload/tasks/main.yml @@ -0,0 +1,84 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role gitreload +# +# Overview: Install gitreload for doing course reload via github Web hooks +# +# +# Dependencies: supervisor, common +# +# +# Example playbook: +# +# - hosts: all +# become: True +# gather_facts: True +# vars: +# COMMON_ENABLE_BASIC_AUTH: True +# roles: +# - gitreload +# - role: nginx +# nginx_sites: +# - gitreload +# + +# grab edxapp vars without being dependent on its tasks +- include_vars: ../../edxapp/defaults/main.yml + tags: + - course_pull + - deploy + +- name: create gitreload user + user: + name: "{{ gitreload_user }}" + home: "{{ gitreload_dir }}" + createhome: no + shell: /bin/false + +- name: ensure home folder exists + file: + path: "{{ gitreload_dir }}" + state: directory + owner: "{{ gitreload_user }}" + group: "{{ gitreload_user }}" + +- name: ensure repo dir exists + file: + path: "{{ GITRELOAD_REPODIR }}" + state: directory + owner: "{{ common_web_user }}" + group: "{{ common_web_group }}" + +- name: grab ssh host keys + shell: ssh-keyscan {{ item }} + become_user: "{{ common_web_user }}" + with_items: "{{ GITRELOAD_HOSTS }}" + register: gitreload_repo_host_keys + +- name: add host keys if needed to known_hosts + lineinfile: + create: yes + dest: ~/.ssh/known_hosts + line: "{{ item.stdout }}" + become_user: "{{ common_web_user }}" + with_items: "{{ gitreload_repo_host_keys.results }}" + +- name: create a symlink for venv python + file: + src: "{{ gitreload_venv_bin }}/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item }}.gitreload" + state: "link" + with_items: + - python + - pip + +- include: deploy.yml tags=deploy diff --git a/playbooks/roles/gitreload/templates/edx/app/gitreload/gitreload_gunicorn.py.j2 b/playbooks/roles/gitreload/templates/edx/app/gitreload/gitreload_gunicorn.py.j2 new file mode 100644 index 00000000000..159d9a093e7 --- /dev/null +++ b/playbooks/roles/gitreload/templates/edx/app/gitreload/gitreload_gunicorn.py.j2 @@ -0,0 +1,20 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html + +{{ ansible_managed }} +""" +import multiprocessing + +preload_app = True +timeout = 10 +bind = "{{ gitreload_gunicorn_host }}:{{ gitreload_gunicorn_port }}" +limit_request_field_size = 16384 + +workers = {{ gitreload_gunicorn_workers }} + +{{ common_close_all_caches }} + +def post_fork(server, worker): + close_all_caches() + +{{ GITRELOAD_GUNICORN_EXTRA_CONF }} diff --git a/playbooks/roles/gitreload/templates/edx/app/gitreload/gr.env.json.j2 b/playbooks/roles/gitreload/templates/edx/app/gitreload/gr.env.json.j2 new file mode 100644 index 00000000000..23015ff629b --- /dev/null +++ b/playbooks/roles/gitreload/templates/edx/app/gitreload/gr.env.json.j2 @@ -0,0 +1 @@ +{{ gitreload_env | to_nice_json }} diff --git a/playbooks/roles/gitreload/templates/edx/app/supervisor/conf.available.d/gitreload.conf.j2 b/playbooks/roles/gitreload/templates/edx/app/supervisor/conf.available.d/gitreload.conf.j2 new file mode 100644 index 00000000000..b50fb9b00b3 --- /dev/null +++ b/playbooks/roles/gitreload/templates/edx/app/supervisor/conf.available.d/gitreload.conf.j2 @@ -0,0 +1,13 @@ +[program:gitreload] + +user={{ common_web_user }} +directory={{ gitreload_dir }} +umask=002 + +command={{ gitreload_venv }}/bin/gunicorn -c {{ gitreload_dir }}/gitreload_gunicorn.py {{ GITRELOAD_GUNICORN_EXTRA }} gitreload.web:app + +environment=PID=/var/tmp/gitreload.pid +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/gluster/tasks/main.yml b/playbooks/roles/gluster/tasks/main.yml index c057cbcc30d..3fcc6986bb2 100644 --- a/playbooks/roles/gluster/tasks/main.yml +++ b/playbooks/roles/gluster/tasks/main.yml @@ -24,7 +24,7 @@ # Ignoring error below so that we can move the data folder and have it be a link - name: all | create folders file: path={{ item.path }} state=directory - with_items: gluster_volumes + with_items: "{{ gluster_volumes }}" when: > "{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}" ignore_errors: yes @@ -32,52 +32,52 @@ - name: primary | create peers command: gluster peer probe {{ item }} - with_items: gluster_peers + with_items: "{{ gluster_peers }}" when: ansible_default_ipv4.address == gluster_primary_ip tags: gluster - name: primary | create volumes command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %} - with_items: gluster_volumes + with_items: "{{ gluster_volumes }}" when: ansible_default_ipv4.address == gluster_primary_ip ignore_errors: yes # There should be better error checking here tags: gluster - name: primary | start volumes command: gluster volume start {{ item.name }} - with_items: gluster_volumes + with_items: "{{ gluster_volumes }}" when: ansible_default_ipv4.address == gluster_primary_ip ignore_errors: yes # There should be better error checking here tags: gluster - name: primary | set security command: gluster volume set {{ item.name }} auth.allow {{ item.security }} - with_items: gluster_volumes + with_items: "{{ gluster_volumes }}" when: ansible_default_ipv4.address == gluster_primary_ip tags: gluster - name: primary | set performance cache command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }} - with_items: gluster_volumes + with_items: "{{ gluster_volumes }}" when: ansible_default_ipv4.address == gluster_primary_ip tags: gluster - name: all | mount volume - mount: > - name={{ item.mount_location }} - src={{ gluster_primary_ip }}:{{ item.name }} - fstype=glusterfs - state=mounted - opts=defaults,_netdev - with_items: gluster_volumes + mount: + name: "{{ item.mount_location }}" + src: "{{ gluster_primary_ip }}:{{ item.name }}" + fstype: glusterfs + state: mounted + opts: defaults,_netdev + with_items: "{{ gluster_volumes }}" tags: gluster # This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system # before the network stack is up and can't lookup 127.0.0.1 - name: all | sleep mount - lineinfile: > - dest=/etc/rc.local - line='sleep 5; /bin/mount -a' - regexp='sleep 5; /bin/mount -a' - insertbefore='exit 0' + lineinfile: + dest: /etc/rc.local + line: 'sleep 5; /bin/mount -a' + regexp: 'sleep 5; /bin/mount -a' + insertbefore: 'exit 0' tags: gluster diff --git a/playbooks/roles/go-agent-docker-server/README.rst b/playbooks/roles/go-agent-docker-server/README.rst new file mode 100644 index 00000000000..12e60520edb --- /dev/null +++ b/playbooks/roles/go-agent-docker-server/README.rst @@ -0,0 +1,6 @@ +In order to use this role you must use a specific set of AMIs +############################################################# + +`This role is for use with the AWS ECS AMIs listed here`_ + +.. _This role is for use with the AWS ECS AMIs listed here: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html diff --git a/playbooks/roles/go-agent-docker-server/defaults/main.yml b/playbooks/roles/go-agent-docker-server/defaults/main.yml new file mode 100644 index 00000000000..b1a3213d3a1 --- /dev/null +++ b/playbooks/roles/go-agent-docker-server/defaults/main.yml @@ -0,0 +1,18 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Defaults for role go-agent-docker-server +# + +# key for go-agents to autoregister with the go-server +GO_SERVER_AUTO_REGISTER_KEY: "dev-only-override-this-key" +GO_AGENT_DOCKER_RESOURCES: "tubular,python" +GO_AGENT_DOCKER_ENVIRONMENT: "sandbox" +GO_AGENT_DOCKER_CONF_HOME: "/tmp/go-agent/conf" \ No newline at end of file diff --git a/playbooks/roles/go-agent-docker-server/tasks/main.yml b/playbooks/roles/go-agent-docker-server/tasks/main.yml new file mode 100644 index 00000000000..93e507bb50a --- /dev/null +++ b/playbooks/roles/go-agent-docker-server/tasks/main.yml @@ -0,0 +1,40 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role go-agent-docker-server +# +# Overview: +# +# Deploys go-server using aptitude! +# +# Dependencies: +# - openjdk7 +# +# Example play: +# +# - name: Configure instance(s) +# hosts: go-server +# sudo: True +# vars_files: +# - "{{ secure_dir }}/admin/sandbox.yml" +# gather_facts: True +# roles: +# - common +# + +- name: install go-server configuration + template: + src: edx/app/go-agent-docker-server/autoregister.properties.j2 + dest: "{{ GO_AGENT_DOCKER_CONF_HOME }}/autoregister.properties" + mode: 0600 + owner: root + group: root + diff --git a/playbooks/roles/go-agent-docker-server/templates/edx/app/go-agent-docker-server/autoregister.properties.j2 b/playbooks/roles/go-agent-docker-server/templates/edx/app/go-agent-docker-server/autoregister.properties.j2 new file mode 100644 index 00000000000..87f29e89256 --- /dev/null +++ b/playbooks/roles/go-agent-docker-server/templates/edx/app/go-agent-docker-server/autoregister.properties.j2 @@ -0,0 +1,3 @@ +agent.auto.register.key={{ GO_SERVER_AUTO_REGISTER_KEY }} +agent.auto.register.resources={{ GO_AGENT_DOCKER_RESOURCES }} +agent.auto.register.environments={{ GO_AGENT_DOCKER_ENVIRONMENT }} \ No newline at end of file diff --git a/playbooks/roles/grafana/.gitignore b/playbooks/roles/grafana/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/playbooks/roles/grafana/defaults/main.yml b/playbooks/roles/grafana/defaults/main.yml new file mode 100644 index 00000000000..153f73094ba --- /dev/null +++ b/playbooks/roles/grafana/defaults/main.yml @@ -0,0 +1,501 @@ +--- + +grafana_apt_pkg: grafana + +GRAFANA_NGINX_PORT: 80 + +# Application mode. +grafana_app_mode: "production" + +# Data paths. +grafana_paths_data: "/var/lib/grafana" +grafana_paths_logs: "/var/log/grafana" +grafana_paths_plugins: "{{ grafana_paths_data }}/plugins" + +grafana_paths: + data: "{{ grafana_paths_data }}" + logs: "{{ grafana_paths_logs }}" + plugins: "{{ grafana_paths_plugins }}" + +# Server basics. +grafana_server_protocol: "http" +grafana_server_http_addr: "127.0.0.1" +grafana_server_http_port: 3000 +grafana_server_domain: "localhost" +grafana_server_enforce_domain: false +grafana_server_root_url: "%(protocol)s://%(domain)s:%(http_port)s/" +grafana_server_router_logging: false +grafana_server_static_root_path: "public" +grafana_server_enable_gzip: false +grafana_server_cert_file: "" +grafana_server_cert_key: "" + +grafana_server: + protocol: "{{ grafana_server_protocol }}" + http_addr: "{{ grafana_server_http_addr }}" + http_port: "{{ grafana_server_http_port }}" + domain: "{{ grafana_server_domain }}" + enforce_domain: "{{ grafana_server_enforce_domain }}" + root_url: "{{ grafana_server_root_url }}" + router_logging: "{{ grafana_server_router_logging }}" + static_root_path: "{{ grafana_server_static_root_path }}" + enable_gzip: "{{ grafana_server_enable_gzip }}" + cert_file: "{{ grafana_server_cert_file }}" + cert_key: "{{ grafana_server_cert_key }}" + +# Database. +grafana_database_type: "sqlite3" +grafana_database_host: 127.0.0.1:3306 +grafana_database_name: "grafana" +grafana_database_user: "root" +grafana_database_password: "" +grafana_database_ssl_mode: "disable" +grafana_database_ca_cert_path: "" +grafana_database_client_key_path: "" +grafana_database_client_cert_path: "" +grafana_database_server_cert_name: "" +grafana_database_path: "grafana.db" + +grafana_database: + type: "{{ grafana_database_type }}" + host: "{{ grafana_database_host }}" + name: "{{ grafana_database_name }}" + user: "{{ grafana_database_user }}" + password: "{{ grafana_database_password }}" + ssl_mode: "{{ grafana_database_ssl_mode }}" + ca_cert_path: "{{ grafana_database_ca_cert_path }}" + client_key_path: "{{ grafana_database_client_key_path }}" + client_cert_path: "{{ grafana_database_client_cert_path }}" + server_cert_name: "{{ grafana_database_server_cert_name }}" + path: "{{ grafana_database_path }}" + +# Sessions. +grafana_session_provider: "file" +grafana_session_provider_config: "sessions" +grafana_session_cookie_name: "grafana_sess" +grafana_session_cookie_secure: false +grafana_session_session_life_time: 86400 +grafana_session_gc_interval_time: 86400 + +grafana_session: + provider: "{{ grafana_session_provider }}" + provider_config: "{{ grafana_session_provider_config }}" + cookie_name: "{{ grafana_session_cookie_name }}" + cookie_secure: "{{ grafana_session_cookie_secure }}" + session_life_time: "{{ grafana_session_session_life_time }}" + gc_interval_time: "{{ grafana_session_gc_interval_time }}" + +# Analytics. +grafana_analytics_reporting_enabled: false +grafana_analytics_check_for_update: true +grafana_analytics_google_analytics_ua_id: "" +grafana_analytics_google_tag_manager_id: "" + +grafana_analytics: + reporting_enabled: "{{ grafana_analytics_reporting_enabled }}" + check_for_update: "{{ grafana_analytics_check_for_update }}" + google_analytics_ua_id: "{{ grafana_analytics_google_analytics_ua_id }}" + google_tag_manager_id: "{{ grafana_analytics_google_tag_manager_id }}" + +# Security. +grafana_security_admin_user: "admin" +grafana_security_admin_password: "CHANGEME" +grafana_security_secret_key: "CHANGEME" +grafana_security_login_remember_days: 7 +grafana_security_cookie_username: "grafana_user" +grafana_security_cookie_remember_name: "grafana_remember" +grafana_security_disable_gravatar: false +grafana_security_data_source_proxy_whitelist: "" + +grafana_security: + admin_user: "{{ grafana_security_admin_user }}" + admin_password: "{{ grafana_security_admin_password }}" + secret_key: "{{ grafana_security_secret_key }}" + login_remember_days: "{{ grafana_security_login_remember_days }}" + cookie_username: "{{ grafana_security_cookie_username }}" + cookie_remember_name: "{{ grafana_security_cookie_remember_name }}" + disable_gravatar: "{{ grafana_security_disable_gravatar }}" + data_source_proxy_whitelist: "{{ grafana_security_data_source_proxy_whitelist }}" + +# Snapshots. +grafana_snapshots_external_enabled: false +grafana_snapshots_external_snapshot_url: "https://snapshots-origin.raintank.io/" +grafana_snapshots_external_snapshot_name: "Publish to snapshot.raintank.io" +grafana_snapshots_snapshot_remove_expired: true +grafana_snapshots_snapshot_TTL_days: "7" + +grafana_snapshots: + external_enabled: "{{ grafana_snapshots_external_enabled }}" + external_snapshot_url: "{{ grafana_snapshots_external_snapshot_url }}" + external_snapshot_name: "{{ grafana_snapshots_external_snapshot_name }}" + snapshot_remove_expired: "{{ grafana_snapshots_snapshot_remove_expired }}" + snapshot_TTL_days: "{{ grafana_snapshots_snapshot_TTL_days }}" + +# Users. +grafana_users_allow_sign_up: true +grafana_users_allow_org_create: true +grafana_users_auto_assign_org: true +grafana_users_auto_assign_org_role: "Viewer" +grafana_users_verify_email_enabled: false +grafana_users_login_hint: "email or username" +grafana_users_default_theme: "dark" + +grafana_users: + allow_sign_up: "{{ grafana_users_allow_sign_up }}" + allow_org_create: "{{ grafana_users_allow_org_create }}" + auto_assign_org: "{{ grafana_users_auto_assign_org }}" + auto_assign_org_role: "{{ grafana_users_auto_assign_org_role }}" + verify_email_enabled: "{{ grafana_users_verify_email_enabled }}" + login_hint: "{{ grafana_users_login_hint }}" + default_theme: "{{ grafana_users_default_theme }}" + +# Authorization. +grafana_auth_disable_login_form: false + +grafana_auth: + disable_login_form: "{{ grafana_auth_disable_login_form }}" + +# Authorization (anonymous). +grafana_auth__anonymous_enabled: false +grafana_auth__anonymous_org_name: "Main Org." +grafana_auth__anonymous_org_role: "Viewer" + +grafana_auth__anonymous: + enabled: "{{ grafana_auth__anonymous_enabled }}" + org_name: "{{ grafana_auth__anonymous_org_name }}" + org_role: "{{ grafana_auth__anonymous_org_role }}" + +# Authorization (GitHub). +grafana_auth__github_enabled: false +grafana_auth__github_allow_sign_up: true +grafana_auth__github_client_id: "some_id" +grafana_auth__github_client_secret: "some_secret" +grafana_auth__github_scopes: "user:email" +grafana_auth__github_auth_url: "https://github.com/login/oauth/authorize" +grafana_auth__github_token_url: "https://github.com/login/oauth/access_token" +grafana_auth__github_api_url: "https://api.github.com/user" +grafana_auth__github_team_ids: "" +grafana_auth__github_allowed_organizations: "" + +grafana_auth__github: + enabled: "{{ grafana_auth__github_enabled }}" + allow_sign_up: "{{ grafana_auth__github_allow_sign_up }}" + client_id: "{{ grafana_auth__github_client_id }}" + client_secret: "{{ grafana_auth__github_client_secret }}" + scopes: "{{ grafana_auth__github_scopes }}" + auth_url: "{{ grafana_auth__github_auth_url }}" + token_url: "{{ grafana_auth__github_token_url }}" + api_url: "{{ grafana_auth__github_api_url }}" + team_ids: "{{ grafana_auth__github_team_ids }}" + allowed_organizations: "{{ grafana_auth__github_allowed_organizations }}" + +# Authorization (Google). +grafana_auth__google_enabled: false +grafana_auth__google_allow_sign_up: true +grafana_auth__google_client_id: "some_client_id" +grafana_auth__google_client_secret: "some_client_secret" +grafana_auth__google_scopes: "https://www.googleapis.com/auth/userinfo.profile%20https://www.googleapis.com/auth/userinfo.email" +grafana_auth__google_auth_url: "https://accounts.google.com/o/oauth2/auth" +grafana_auth__google_token_url: "https://accounts.google.com/o/oauth2/token" +grafana_auth__google_api_url: "https://www.googleapis.com/oauth2/v1/userinfo" +grafana_auth__google_allowed_domains: "" +grafana_auth__google_hosted_domain: false + +grafana_auth__google: + enabled: "{{ grafana_auth__google_enabled }}" + allow_sign_up: "{{ grafana_auth__google_allow_sign_up }}" + client_id: "{{ grafana_auth__google_client_id }}" + client_secret: "{{ grafana_auth__google_client_secret }}" + scopes: "{{ grafana_auth__google_scopes }}" + auth_url: "{{ grafana_auth__google_auth_url }}" + token_url: "{{ grafana_auth__google_token_url }}" + api_url: "{{ grafana_auth__google_api_url }}" + allowed_domains: "{{ grafana_auth__google_allowed_domains }}" + hosted_domain: "{{ grafana_auth__google_hosted_domain }}" + +# Authorization (Grafananet). +grafana_auth__grafananet_enabled: false +grafana_auth__grafananet_allow_sign_up: true +grafana_auth__grafananet_client_id: "some_client_id" +grafana_auth__grafananet_client_secret: "some_client_secret" +grafana_auth__grafananet_scopes: "user:email" +grafana_auth__grafananet_allowed_organizations: "" +grafana_auth__grafananet_hosted_domain: false + +grafana_auth__grafananet: + enabled: "{{ grafana_auth__grafananet_enabled }}" + allow_sign_up: "{{ grafana_auth__grafananet_allow_sign_up }}" + client_id: "{{ grafana_auth__grafananet_client_id }}" + client_secret: "{{ grafana_auth__grafananet_client_secret }}" + scopes: "{{ grafana_auth__grafananet_scopes }}" + allowed_organizations: "{{ grafana_auth__grafananet_allowed_organizations }}" + +# Authorization (generic OAuth). +grafana_auth__generic_oauth_enabled: false +grafana_auth__generic_oauth_allow_sign_up: true +grafana_auth__generic_oauth_client_id: "some_id" +grafana_auth__generic_oauth_client_secret: "some_secret" +grafana_auth__generic_oauth_scopes: "user:email" +grafana_auth__generic_oauth_auth_url: "" +grafana_auth__generic_oauth_token_url: "" +grafana_auth__generic_oauth_api_url: "" +grafana_auth__generic_oauth_team_ids: "" +grafana_auth__generic_oauth_allowed_organizations: "" + +grafana_auth__generic_oauth: + enabled: "{{ grafana_auth__generic_oauth_enabled }}" + allow_sign_up: "{{ grafana_auth__generic_oauth_allow_sign_up }}" + client_id: "{{ grafana_auth__generic_oauth_client_id }}" + client_secret: "{{ grafana_auth__generic_oauth_client_secret }}" + scopes: "{{ grafana_auth__generic_oauth_scopes }}" + auth_url: "{{ grafana_auth__generic_oauth_auth_url }}" + token_url: "{{ grafana_auth__generic_oauth_token_url }}" + api_url: "{{ grafana_auth__generic_oauth_api_url }}" + team_ids: "{{ grafana_auth__generic_oauth_team_ids }}" + allowed_organizations: "{{ grafana_auth__generic_oauth_allowed_organizations }}" + +# Authorization (basic). +grafana_auth__basic_enabled: false + +grafana_auth__basic: + enabled: "{{ grafana_auth__basic_enabled }}" + +# Authorization (proxy). +grafana_auth__proxy_enabled: false +grafana_auth__proxy_header_name: "X-Forwarded-User" +grafana_auth__proxy_header_property: "username" +grafana_auth__proxy_auto_sign_up: true +grafana_auth__proxy_ldap_sync_ttl: "60" +grafana_auth__proxy_whitelist: "" + +grafana_auth__proxy: + enabled: "{{ grafana_auth__proxy_enabled }}" + header_name: "{{ grafana_auth__proxy_header_name }}" + header_property: "{{ grafana_auth__proxy_header_property }}" + auto_sign_up: "{{ grafana_auth__proxy_auto_sign_up }}" + ldap_sync_ttl: "{{ grafana_auth__proxy_ldap_sync_ttl }}" + whitelist: "{{ grafana_auth__proxy_whitelist }}" + +# Authorization (LDAP). +grafana_auth__ldap_enabled: false +grafana_auth__ldap_config_file: "/etc/grafana/ldap.toml" +grafana_auth__ldap_auto_sign_up: true + +grafana_auth__ldap: + enabled: "{{ grafana_auth__ldap_enabled }}" + config_file: "{{ grafana_auth__ldap_config_file }}" + auto_sign_up: "{{ grafana_auth__ldap_auto_sign_up }}" + +# SMTP. +grafana_smtp_enabled: false +grafana_smtp_host: "X-Forwarded-User" +grafana_smtp_user: "username" +grafana_smtp_password: true +grafana_smtp_cert_file: "60" +grafana_smtp_key_file: "" +grafana_smtp_skip_verify: false +grafana_smtp_from_address: "admin@grafana.localhost" + +grafana_smtp: + enabled: "{{ grafana_smtp_enabled }}" + host: "{{ grafana_smtp_host }}" + user: "{{ grafana_smtp_user }}" + password: "{{ grafana_smtp_password }}" + cert_file: "{{ grafana_smtp_cert_file }}" + key_file: "{{ grafana_smtp_key_file }}" + skip_verify: "{{ grafana_smtp_skip_verify }}" + from_address: "{{ grafana_smtp_from_address }}" + +# E-mails. +grafana_emails_welcome_email_on_sign_up: false +grafana_emails_templates_pattern: "emails/*.html" + +grafana_emails: + welcome_email_on_sign_up: "{{ grafana_emails_welcome_email_on_sign_up }}" + templates_pattern: "{{ grafana_emails_templates_pattern }}" + +# Logging. +grafana_log_mode: "console file" +grafana_log_level: "info" +grafana_log_filters: "" + +grafana_log: + mode: "{{ grafana_log_mode }}" + level: "{{ grafana_log_level }}" + filters: "{{ grafana_log_filters }}" + +# Logging (console). +grafana_log__console_level: "" +grafana_log__console_format: "console" + +grafana_log__console: + level: "{{ grafana_log__console_level }}" + format: "{{ grafana_log__console_format }}" + +# Logging (file). +grafana_log__file_level: "" +grafana_log__file_format: "text" +grafana_log__file_log_rotate: true +grafana_log__file_max_lines: 1000000 +grafana_log__file_max_size_shift: 28 +grafana_log__file_daily_rotate: true +grafana_log__file_max_days: 7 + +grafana_log__file: + level: "{{ grafana_log__file_level }}" + format: "{{ grafana_log__file_format }}" + log_rotate: "{{ grafana_log__file_log_rotate }}" + max_lines: "{{ grafana_log__file_max_lines }}" + max_size_shift: "{{ grafana_log__file_max_size_shift }}" + daily_rotate: "{{ grafana_log__file_daily_rotate }}" + max_days: "{{ grafana_log__file_max_days }}" + +# Logging (syslog). +grafana_log__syslog_level: "" +grafana_log__syslog_format: "text" +grafana_log__syslog_network: "" +grafana_log__syslog_address: "" +grafana_log__syslog_facility: "" +grafana_log__syslog_tag: "" + +grafana_log__syslog: + level: "{{ grafana_log__syslog_level }}" + format: "{{ grafana_log__syslog_format }}" + network: "{{ grafana_log__syslog_network }}" + address: "{{ grafana_log__syslog_address }}" + facility: "{{ grafana_log__syslog_facility }}" + tag: "{{ grafana_log__syslog_tag }}" + +# Event publishing. +grafana_event_publisher_enabled: false +grafana_event_publisher_rabbitmq_url: "amqp://localhost/" +grafana_event_publisher_exchange: "grafana_events" + +grafana_event_publisher: + enabled: "{{ grafana_event_publisher_enabled }}" + rabbitmq_url: "{{ grafana_event_publisher_rabbitmq_url }}" + exchange: "{{ grafana_event_publisher_exchange }}" + +# dashboards.json +grafana_dashboards_json_enabled: false +grafana_dashboards_json_path: "/var/lib/grafana/dashboards" + +grafana_dashboards_json: + enabled: "{{ grafana_dashboards_json_enabled }}" + path: "{{ grafana_dashboards_json_path }}" + +# Quotas. +grafana_quota_enabled: false +grafana_quota_org_user: 10 +grafana_quota_org_dashboard: 100 +grafana_quota_org_data_source: 10 +grafana_quota_org_api_key: 10 +grafana_quota_user_org: 10 +grafana_quota_global_user: -1 +grafana_quota_global_org: -1 +grafana_quota_global_dashboard: -1 +grafana_quota_global_api_key: -1 +grafana_quota_global_session: -1 + +grafana_quota: + enabled: "{{ grafana_quota_enabled }}" + org_user: "{{ grafana_quota_org_user }}" + org_dashboard: "{{ grafana_quota_org_dashboard }}" + org_data_source: "{{ grafana_quota_org_data_source }}" + org_api_key: "{{ grafana_quota_org_api_key }}" + user_org: "{{ grafana_quota_user_org }}" + global_user: "{{ grafana_quota_global_user }}" + global_org: "{{ grafana_quota_global_org }}" + global_dashboard: "{{grafana_quota_global_dashboard }}" + global_api_key: "{{ grafana_quota_global_api_key }}" + global_session: "{{ grafana_quota_global_session }}" + +# Alerting. +grafana_alerting_execute_alerts: true + +grafana_alerting: + execute_alerts: "{{ grafana_alerting_execute_alerts }}" + +# Metrics. +grafana_metrics_enabled: true +grafana_metrics_interval_seconds: 10 + +grafana_metrics: + enabled: "{{ grafana_metrics_enabled }}" + interval_seconds: "{{ grafana_metrics_interval_seconds }}" + +# Metrics (Graphite). +grafana_metrics__graphite_address: "" +grafana_metrics__graphite_prefix: "prod.grafana.%(instance_name)s." + +grafana_metrics__graphite: + address: "{{ grafana_metrics__graphite_address }}" + prefix: "{{ grafana_metrics__graphite_prefix }}" + +# Grafananet. +grafana_grafana_net_url: "https://grafana.net/" + +grafana_grafana_net: + url: "{{ grafana_grafana_net_url }}" + +# External image storage. +grafana_external_image_storage_provider: "" + +grafana_external_image_storage: + provider: "{{ grafana_external_image_storage_provider }}" + +# External image storage (S3). +grafana_external_image_storage__s3_bucket_url: "" +grafana_external_image_storage__s3_access_key: "" +grafana_external_image_storage__s3_secret_key: "" + +grafana_external_image_storage__s3: + bucket_url: "{{ grafana_external_image_storage__s3_bucket_url }}" + access_key: "{{ grafana_external_image_storage__s3_access_key }}" + secret_key: "{{ grafana_external_image_storage__s3_secret_key }}" + +# External image storage (WebDAV). +grafana_external_image_storage__webdav_url: "" +grafana_external_image_storage__webdav_username: "" +grafana_external_image_storage__webdav_password: "" + +grafana_external_image_storage__webdav: + url: "{{ grafana_external_image_storage__webdav_url }}" + username: "{{ grafana_external_image_storage__webdav_username }}" + password: "{{ grafana_external_image_storage__webdav_password }}" + +# Overall configuration. +grafana_config: + app_mode: "{{ grafana_app_mode }}" + paths: "{{ grafana_paths }}" + server: "{{ grafana_server }}" + database: "{{ grafana_database }}" + session: "{{ grafana_session }}" + analytics: "{{ grafana_analytics }}" + security: "{{ grafana_security }}" + snapshots: "{{ grafana_snapshots }}" + users: "{{ grafana_users }}" + auth.anonymous: "{{ grafana_auth__anonymous }}" + auth.github: "{{ grafana_auth__github }}" + auth.google: "{{ grafana_auth__google }}" + auth.grafananet: "{{ grafana_auth__grafananet }}" + auth.generic_oauth: "{{ grafana_auth__generic_oauth }}" + auth.basic: "{{ grafana_auth__basic }}" + auth.proxy: "{{ grafana_auth__proxy }}" + auth.ldap: "{{ grafana_auth__ldap }}" + smtp: "{{ grafana_smtp }}" + emails: "{{ grafana_emails }}" + log: "{{ grafana_log }}" + log.console: "{{ grafana_log__console }}" + log.file: "{{ grafana_log__file }}" + log.syslog: "{{ grafana_log__syslog }}" + event_publisher: "{{ grafana_event_publisher }}" + dashboards.json: "{{ grafana_dashboards_json }}" + quota: "{{ grafana_quota }}" + alerting: "{{ grafana_alerting }}" + metrics: "{{ grafana_metrics }}" + metrics.graphite: "{{ grafana_metrics__graphite }}" + grafana_net: "{{ grafana_grafana_net }}" + external_image_storage: "{{ grafana_external_image_storage }}" + external_image_storage.s3: "{{ grafana_external_image_storage__s3 }}" + external_image_storage.webdav: "{{ grafana_external_image_storage__webdav }}" diff --git a/playbooks/roles/grafana/meta/main.yml b/playbooks/roles/grafana/meta/main.yml new file mode 100644 index 00000000000..ebf5eaf33f9 --- /dev/null +++ b/playbooks/roles/grafana/meta/main.yml @@ -0,0 +1,6 @@ +--- +dependencies: + - role: common + tags: + - always + - config-encoders diff --git a/playbooks/roles/grafana/tasks/main.yml b/playbooks/roles/grafana/tasks/main.yml new file mode 100644 index 00000000000..e45f2c6253a --- /dev/null +++ b/playbooks/roles/grafana/tasks/main.yml @@ -0,0 +1,84 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Tasks for role graphite +# +# Overview: +# +# This task is to install Grafana, which is a UI for creating visualizations on top of +# common metrics data sources such as Graphite, InfluxDB, CloudWatch, and more. +# +# Dependencies: +# - common +# - config-encoders +# + +- name: ensure correct OS + fail: + msg: "this playbook can only be run on an Ubuntu host" + when: ansible_distribution != "Ubuntu" + +- name: install packages.grafana.com GPG key + apt_key: + url: "https://packages.grafana.com/gpg.key" + state: present + tags: + - install + - install:system-requirements + +- name: install packages.grafana.com PPA + apt_repository: + # PackageCloud isn't enabled anymore, so using packages.grafana.com(official) instead + repo: "deb https://packages.grafana.com/oss/deb stable main" + update_cache: yes + state: present + tags: + - install + - install:system-requirements + +- name: install grafana + apt: + name: "{{ grafana_apt_pkg }}" + state: present + tags: + - install + - install:system-requirements + +- name: configure grafana + template: + src: "conf/grafana.ini.j2" + dest: "/etc/grafana/grafana.ini" + tags: + - install + - install:app-configuration + +- name: enable grafana + service: + name: grafana-server + enabled: yes + state: started + tags: + - install + - install:configuration + +- name: stop grafana + service: + name: grafana-server + state: stopped + tags: + - manage:stop + +- name: restart grafana + service: + name: grafana-server + state: restarted + tags: + - manage:start + - manage:restart diff --git a/playbooks/roles/grafana/templates/conf/grafana.ini.j2 b/playbooks/roles/grafana/templates/conf/grafana.ini.j2 new file mode 100644 index 00000000000..e665f591798 --- /dev/null +++ b/playbooks/roles/grafana/templates/conf/grafana.ini.j2 @@ -0,0 +1 @@ +{{ grafana_config | encode_ini }} diff --git a/playbooks/roles/graphite/LICENSE b/playbooks/roles/graphite/LICENSE new file mode 100644 index 00000000000..0dba6521409 --- /dev/null +++ b/playbooks/roles/graphite/LICENSE @@ -0,0 +1,206 @@ +Originally authored by Hector Castro (https://github.com/azavea/ansible-graphite) + +------------------------------------------------------------------------------- + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/playbooks/roles/graphite/defaults/main.yml b/playbooks/roles/graphite/defaults/main.yml new file mode 100644 index 00000000000..3c0b651147f --- /dev/null +++ b/playbooks/roles/graphite/defaults/main.yml @@ -0,0 +1,137 @@ +--- +GRAPHITE_CARBON_GIT_URL: "https://github.com/graphite-project/carbon.git" +GRAPHITE_WHISPER_GIT_URL: "https://github.com/graphite-project/whisper.git" +GRAPHITE_API_GIT_URL: "https://github.com/brutasse/graphite-api.git" + +GRAPHITE_CARBON_VERSION: "816ac631efae33c7b47ecbe79ca9e1f907e5efe8" +GRAPHITE_WHISPER_VERSION: "0.9.15" +GRAPHITE_API_VERSION: "1.1.3" + +graphite_root: "/opt/graphite" +graphite_storage_root: "/var/opt/graphite" +graphite_common_dirs: + root: + path: "{{ graphite_root }}" + owner: "{{ graphite_user }}" + group: "{{ graphite_group }}" + mode: "0755" + src: + path: "{{ graphite_root }}/src" + owner: "{{ graphite_user }}" + group: "{{ graphite_group }}" + mode: "0755" + conf: + path: "{{ graphite_root }}/conf" + owner: "{{ graphite_user }}" + group: "{{ graphite_group }}" + mode: "0755" + run: + path: "{{ graphite_root }}/run" + owner: "{{ graphite_user }}" + group: "{{ graphite_group }}" + mode: "0775" + storage: + path: "{{ graphite_storage_root }}" + owner: "{{ graphite_user }}" + group: "{{ graphite_group }}" + mode: "0775" + log: + path: "/var/log/carbon" + owner: "{{ carbon_user }}" + group: "{{ graphite_group }}" + mode: "0755" + +graphite_common_deb_deps: + - libcairo2-dev + - libffi-dev + - pkg-config + - fontconfig + +carbon_user: "carbon" +graphite_user: "graphite" +graphite_group: "graphite" + +GRAPHITE_NGINX_PORT: 6000 +GRAPHITE_GUNICORN_WORKERS: 3 + +carbon_listen_addr: 0.0.0.0 + +carbon_storage_dir: "{{ graphite_storage_root }}" +carbon_cache_write_strategy: max +carbon_max_cache_size: inf +carbon_use_flow_control: True +carbon_whisper_fallocate_create: True +carbon_max_creates_per_minute: 100 +carbon_max_updates_per_second: 1000 +carbon_line_receiver_interface: "{{ carbon_listen_addr }}" +carbon_pickle_receiver_interface: "{{ carbon_listen_addr }}" +carbon_cache_query_interface: "{{ carbon_listen_addr }}" +carbon_use_insecure_unpickler: False +carbon_log_cache_hits: False +carbon_log_cache_queue_sorts: True +carbon_log_listener_connections: True +carbon_log_updates: False +carbon_enable_log_rotation: True +carbon_whisper_autoflush: False +carbon_default_line_receiver_port: 2003 +carbon_default_pickle_receiver_port: 2004 +carbon_default_cache_query_port: 7002 + +CARBON_CONF: + cache: + USER: "{{ carbon_user }}" + STORAGE_DIR: "{{ graphite_storage_root }}" + CACHE_WRITE_STRATEGY: "{{ carbon_cache_write_strategy }}" + MAX_CACHE_SIZE: "{{ carbon_max_cache_size }}" + USE_FLOW_CONTROL: "{{ carbon_use_flow_control }}" + WHISPER_FALLOCATE_CREATE: "{{ carbon_whisper_fallocate_create }}" + MAX_CREATES_PER_MINUTE: "{{ carbon_max_creates_per_minute }}" + MAX_UPDATES_PER_SECOND: "{{ carbon_max_updates_per_second }}" + LINE_RECEIVER_INTERFACE: "{{ carbon_listen_addr }}" + PICKLE_RECEIVER_INTERFACE: "{{ carbon_listen_addr }}" + USE_INSECURE_UNPICKLER: "{{ carbon_use_insecure_unpickler }}" + CACHE_QUERY_INTERFACE: "{{ carbon_listen_addr }}" + LOG_CACHE_HITS: "{{ carbon_log_cache_hits }}" + LOG_CACHE_QUEUE_SORTS: "{{ carbon_log_cache_queue_sorts }}" + LOG_LISTENER_CONNECTIONS: "{{ carbon_log_listener_connections }}" + LOG_UPDATES: "{{ carbon_log_updates }}" + ENABLE_LOGROTATION: "{{ carbon_enable_log_rotation }}" + WHISPER_AUTOFLUSH: "{{ carbon_whisper_autoflush }}" + + "cache:1": + LINE_RECEIVER_PORT: "{{ carbon_default_line_receiver_port }}" + PICKLE_RECEIVER_PORT: "{{ carbon_default_line_receiver_port }}" + CACHE_QUERY_PORT: "{{ carbon_default_cache_query_port }}" + +CARBON_STORAGE_SCHEMAS: + carbon: + description: "Catches all of Carbon's internal metrics" + pattern: "carbon.*" + retentions: "10s:1y" + default: + pattern: ".*" + retentions: "60s:1y" + +CARBON_STORAGE_AGGREGATIONS: + min: + pattern: "\\.min$" + xFilesFactor: "0.1" + aggregationMethod: "min" + max: + pattern: "\\.max$" + xFilesFactor: "0.1" + aggregationMethod: "max" + sum: + pattern: "\\.count$" + xFilesFactor: "0" + aggregationMethod: "sum" + default_average: + pattern: ".*" + xFilesFactor: "0.5" + aggregationMethod: "average" + +GRAPHITE_API_CONF: + search_index: "{{ graphite_root }}/api/index" + whisper: + directories: + - "{{ graphite_storage_root }}/whisper" diff --git a/playbooks/roles/graphite/meta/main.yml b/playbooks/roles/graphite/meta/main.yml new file mode 100644 index 00000000000..ebf5eaf33f9 --- /dev/null +++ b/playbooks/roles/graphite/meta/main.yml @@ -0,0 +1,6 @@ +--- +dependencies: + - role: common + tags: + - always + - config-encoders diff --git a/playbooks/roles/graphite/tasks/carbon.yml b/playbooks/roles/graphite/tasks/carbon.yml new file mode 100644 index 00000000000..912d4237d36 --- /dev/null +++ b/playbooks/roles/graphite/tasks/carbon.yml @@ -0,0 +1,76 @@ +--- +- name: checkout carbon + git: + repo: "{{ GRAPHITE_CARBON_GIT_URL }}" + dest: "{{ graphite_root }}/src/carbon" + version: "{{ GRAPHITE_CARBON_VERSION }}" + tags: + - install + - install:code + +- name: install carbon dependencies + pip: + virtualenv: "{{ graphite_root }}" + requirements: "{{ graphite_root }}/src/carbon/requirements.txt" + tags: + - install + - install:app-requirements + +- name: install carbon + command: "{{ graphite_root }}/bin/python setup.py install" + args: + chdir: "{{ graphite_root }}/src/carbon" + creates: "{{ graphite_root }}/bin/carbon-cache.py" + tags: + - install + - install:app-requirements + +- name: configure carbon + template: + src: "carbon/conf/{{ item }}.conf.j2" + dest: "{{ graphite_root }}/conf/{{ item }}.conf" + with_items: + - carbon + - storage-schemas + - storage-aggregation + tags: + - install + - install:app-configuration + +- name: configure carbon service definition + template: + src: carbon/systemd/carbon-cache.service.j2 + dest: /etc/systemd/system/carbon-cache.service + tags: + - install + - install:configuration + +- name: reload systemd configuration + command: systemctl daemon-reload + tags: + - install + - install:configuration + +- name: enable carbon + service: + name: carbon-cache + enabled: yes + state: started + tags: + - install + - install:configuration + +- name: stop carbon + service: + name: carbon-cache + state: stopped + tags: + - manage:stop + +- name: restart carbon + service: + name: carbon-cache + state: restarted + tags: + - manage:start + - manage:restart diff --git a/playbooks/roles/graphite/tasks/graphite-api.yml b/playbooks/roles/graphite/tasks/graphite-api.yml new file mode 100644 index 00000000000..6c3ef629246 --- /dev/null +++ b/playbooks/roles/graphite/tasks/graphite-api.yml @@ -0,0 +1,83 @@ +--- +- name: checkout graphite-api + git: + repo: "{{ GRAPHITE_API_GIT_URL }}" + dest: "{{ graphite_root }}/src/api" + version: "{{ GRAPHITE_API_VERSION }}" + tags: + - install + - install:code + +- name: install graphite-api dependencies + pip: + virtualenv: "{{ graphite_root }}" + requirements: "{{ graphite_root }}/src/api/requirements.txt" + tags: + - install + - install:app-requirements + +- name: install graphite-api + command: "{{ graphite_root }}/bin/python setup.py install" + args: + chdir: "{{ graphite_root }}/src/api" + tags: + - install + - install:app-requirements + +- name: install gunicorn + pip: + virtualenv: "{{ graphite_root }}" + name: gunicorn + tags: + - install + - install:app-requirements + +- name: configure graphite-api + template: + src: graphite-api/conf/graphite-api.yml.j2 + dest: "{{ graphite_root }}/conf/graphite-api.yml" + owner: "{{ graphite_user }}" + group: "{{ graphite_group }}" + tags: + - install + - install:app-configuration + +- name: configure graphite-api service definitions + template: + src: "graphite-api/systemd/{{ item }}.j2" + dest: "/etc/systemd/system/{{ item }}" + with_items: + - graphite-api.socket + - graphite-api.service + tags: + - install + - install:configuration + +- name: reload systemd configuration + command: systemctl daemon-reload + tags: + - install + - install:configuration + +- name: enable graphite-api + service: + name: "{{ item }}" + enabled: yes + with_items: + - graphite-api.socket + - graphite-api.service + +- name: stop graphite-api + service: + name: graphite-api + state: stopped + tags: + - manage:stop + +- name: restart graphite-api + service: + name: graphite-api + state: restarted + tags: + - manage:start + - manage:restart diff --git a/playbooks/roles/graphite/tasks/main.yml b/playbooks/roles/graphite/tasks/main.yml new file mode 100644 index 00000000000..2635768317b --- /dev/null +++ b/playbooks/roles/graphite/tasks/main.yml @@ -0,0 +1,90 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Tasks for role graphite +# +# Overview: +# +# This task is to install "Graphite", or more specifically, +# its constituent parts: carbon and graphite-api. +# +# For our needs at edX, we do not need the full-fledged +# graphite-web, instead opting to use the light graphite-api, +# which can be pointed to by other tools such as Grafana. +# +# Dependencies: +# - common +# - config-encoders +# + +- name: create Graphite user group + group: + name: "{{ graphite_group }}" + state: present + tags: + - install + - install:base + +- name: create service account for Graphite + user: + name: "{{ graphite_user }}" + system: yes + home: "{{ graphite_root }}" + shell: /bin/false + group: "{{ graphite_group }}" + state: present + tags: + - install + - install:base + +- name: create service account for Carbon + user: + name: "{{ carbon_user }}" + system: yes + home: "{{ graphite_root }}" + shell: /bin/false + group: "{{ graphite_group }}" + state: present + tags: + - install + - install:base + +- name: create common Graphite directories + file: + path: "{{ item.value.path }}" + state: directory + owner: "{{ item.value.owner }}" + group: "{{ item.value.group }}" + mode: "{{ item.value.mode }}" + with_dict: "{{ graphite_common_dirs }}" + tags: + - install + - install:base + +- name: install Graphite dependencies + apt: + pkg: "{{ item }}" + state: present + with_items: "{{ graphite_common_deb_deps }}" + tags: + - install + - install:system-requirements + +- name: initialize virtualenv for Graphite + command: "virtualenv {{ graphite_root }}" + args: + creates: "{{ graphite_root }}/bin/activate" + tags: + - install + - install:base + +- include: whisper.yml +- include: carbon.yml +- include: graphite-api.yml diff --git a/playbooks/roles/graphite/tasks/whisper.yml b/playbooks/roles/graphite/tasks/whisper.yml new file mode 100644 index 00000000000..0caae8246f2 --- /dev/null +++ b/playbooks/roles/graphite/tasks/whisper.yml @@ -0,0 +1,18 @@ +--- +- name: checkout Whisper + git: + repo: "{{ GRAPHITE_WHISPER_GIT_URL }}" + dest: "{{ graphite_root }}/src/whisper" + version: "{{ GRAPHITE_WHISPER_VERSION }}" + tags: + - install + - install:code + +- name: install Whisper + command: "{{ graphite_root }}/bin/python setup.py install" + args: + chdir: "{{ graphite_root }}/src/whisper" + creates: "{{ graphite_root }}/bin/whisper-create.py" + tags: + - install + - install:app-requirements diff --git a/playbooks/roles/graphite/templates/carbon/conf/carbon.conf.j2 b/playbooks/roles/graphite/templates/carbon/conf/carbon.conf.j2 new file mode 100644 index 00000000000..5ffdde20c9c --- /dev/null +++ b/playbooks/roles/graphite/templates/carbon/conf/carbon.conf.j2 @@ -0,0 +1 @@ +{{ CARBON_CONF | encode_ini }} diff --git a/playbooks/roles/graphite/templates/carbon/conf/storage-aggregation.conf.j2 b/playbooks/roles/graphite/templates/carbon/conf/storage-aggregation.conf.j2 new file mode 100644 index 00000000000..ca8339b07f1 --- /dev/null +++ b/playbooks/roles/graphite/templates/carbon/conf/storage-aggregation.conf.j2 @@ -0,0 +1,14 @@ +# Aggregation methods for whisper files. Entries are scanned in order, +# and first match wins. This file is scanned for changes every 60 seconds +# +# [name] +# pattern = +# xFilesFactor = +# aggregationMethod = +# +# name: Arbitrary unique name for the rule +# pattern: Regex pattern to match against the metric name +# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur +# aggregationMethod: function to apply to data points for aggregation + +{{ CARBON_STORAGE_AGGREGATIONS_OVERRIDE | default(CARBON_STORAGE_AGGREGATIONS) | encode_ini }} diff --git a/playbooks/roles/graphite/templates/carbon/conf/storage-schemas.conf.j2 b/playbooks/roles/graphite/templates/carbon/conf/storage-schemas.conf.j2 new file mode 100644 index 00000000000..c27d39e2126 --- /dev/null +++ b/playbooks/roles/graphite/templates/carbon/conf/storage-schemas.conf.j2 @@ -0,0 +1,4 @@ +# Schema definitions for Whisper files. Entries are scanned in order, +# and first match wins. This file is scanned for changes every 60 seconds. + +{{ CARBON_STORAGE_SCHEMAS_OVERRIDE | default(CARBON_STORAGE_SCHEMAS) | encode_ini }} diff --git a/playbooks/roles/graphite/templates/carbon/systemd/carbon-cache.service.j2 b/playbooks/roles/graphite/templates/carbon/systemd/carbon-cache.service.j2 new file mode 100644 index 00000000000..2f9296a497d --- /dev/null +++ b/playbooks/roles/graphite/templates/carbon/systemd/carbon-cache.service.j2 @@ -0,0 +1,19 @@ +[Unit] +Description=carbon-cache +After=network.target + +[Service] +Type=forking +StandardOutput=syslog +StandardError=syslog +User={{ carbon_user }} +Group={{ graphite_group }} +ExecStart={{ graphite_root }}/bin/carbon-cache.py --config={{ graphite_root }}/conf/carbon.conf --pidfile={{ graphite_root }}/run/carbon-cache.pid --logdir=/var/log/carbon/ start +ExecReload=/bin/kill -USR1 $MAINPID +ExecStop={{ graphite_root }}/bin/carbon-cache.py --pidfile={{ graphite_root }}/run/carbon-cache.pid stop +PIDFile={{ graphite_root }}/run/carbon-cache.pid +PrivateTmp=true +LimitNOFILE=128000 + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/graphite/templates/graphite-api/conf/graphite-api.yml.j2 b/playbooks/roles/graphite/templates/graphite-api/conf/graphite-api.yml.j2 new file mode 100644 index 00000000000..e892264f0b6 --- /dev/null +++ b/playbooks/roles/graphite/templates/graphite-api/conf/graphite-api.yml.j2 @@ -0,0 +1 @@ +{{ GRAPHITE_API_CONF | encode_yaml }} diff --git a/playbooks/roles/graphite/templates/graphite-api/systemd/graphite-api.service.j2 b/playbooks/roles/graphite/templates/graphite-api/systemd/graphite-api.service.j2 new file mode 100644 index 00000000000..58fb687e147 --- /dev/null +++ b/playbooks/roles/graphite/templates/graphite-api/systemd/graphite-api.service.j2 @@ -0,0 +1,21 @@ +[Unit] +Description=graphite-api service +After=network.target +Requires=graphite-api.socket + +[Service] +StandardOutput=syslog +StandardError=syslog +User={{ graphite_user }} +Group={{ graphite_group }} +WorkingDirectory={{ graphite_root }} +Environment=GRAPHITE_API_CONFIG={{ graphite_root }}/conf/graphite-api.yml +ExecStart={{ graphite_root }}/bin/gunicorn -w {{ GRAPHITE_GUNICORN_WORKERS }} graphite_api.app:app +Restart=on-failure +ExecReload=/bin/kill -s HUP $MAINPID +ExecStop=/bin/kill -s TERM $MAINPID +PrivateTmp=true +LimitNOFILE=128000 + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/graphite/templates/graphite-api/systemd/graphite-api.socket.j2 b/playbooks/roles/graphite/templates/graphite-api/systemd/graphite-api.socket.j2 new file mode 100644 index 00000000000..8c0c264e8fe --- /dev/null +++ b/playbooks/roles/graphite/templates/graphite-api/systemd/graphite-api.socket.j2 @@ -0,0 +1,10 @@ +[Unit] +Description=graphite-api socket + +[Socket] +SocketUser={{ graphite_user }} +SocketGroup={{ graphite_group }} +ListenStream={{ graphite_root }}/run/graphite-api.sock + +[Install] +WantedBy=sockets.target diff --git a/playbooks/roles/hadoop_common/defaults/main.yml b/playbooks/roles/hadoop_common/defaults/main.yml new file mode 100644 index 00000000000..45d332f7a1f --- /dev/null +++ b/playbooks/roles/hadoop_common/defaults/main.yml @@ -0,0 +1,132 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role hadoop_common +# + +HADOOP_COMMON_VERSION: 2.7.2 +HADOOP_COMMON_USER_HOME: "{{ COMMON_APP_DIR }}/hadoop" +HADOOP_COMMON_HOME: "{{ HADOOP_COMMON_USER_HOME }}/hadoop" +HADOOP_COMMON_DATA: "{{ COMMON_DATA_DIR }}/hadoop" + +# These are non-standard directories, but are where Hadoop expects to find them. +HADOOP_COMMON_LOGS: "{{ HADOOP_COMMON_HOME }}/logs" +HADOOP_COMMON_CONF_DIR: "{{ HADOOP_COMMON_HOME }}/etc/hadoop" + +HADOOP_COMMON_PROTOBUF_VERSION: 2.5.0 +HADOOP_COMMON_SERVICES_DIR: "{{ HADOOP_COMMON_USER_HOME }}/services.d" +HADOOP_COMMON_SERVICE_HEAP_MAX: 256 +HADOOP_COMMON_TOOL_HEAP_MAX: 128 + +hadoop_common_role_name: hadoop_common +hadoop_common_user: hadoop +hadoop_common_group: hadoop +hadoop_common_temporary_dir: /var/tmp +hadoop_common_dist: + filename: "hadoop-{{ HADOOP_COMMON_VERSION }}.tar.gz" + url: "https://archive.apache.org/dist/hadoop/common/hadoop-%7B%7B%20HADOOP_COMMON_VERSION%20%7D%7D/hadoop-%7B%7B%20HADOOP_COMMON_VERSION%20%7D%7D.tar.gz" + sha256sum: 49AD740F85D27FA39E744EB9E3B1D9442AE63D62720F0AABDAE7AA9A718B03F7 + +hadoop_common_protobuf_dist: + filename: "protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }}.tar.gz" + url: "https://github.com/google/protobuf/releases/download/v%7B%7B%20HADOOP_COMMON_PROTOBUF_VERSION%20%7D%7D/protobuf-%7B%7B%20HADOOP_COMMON_PROTOBUF_VERSION%20%7D%7D.tar.gz" + sha256sum: c55aa3dc538e6fd5eaf732f4eb6b98bdcb7cedb5b91d3b5bdcf29c98c293f58e + +hadoop_common_native_dist: + filename: "hadoop-{{ HADOOP_COMMON_VERSION }}-src.tar.gz" + url: "https://archive.apache.org/dist/hadoop/common/hadoop-%7B%7B%20HADOOP_COMMON_VERSION%20%7D%7D/hadoop-%7B%7B%20HADOOP_COMMON_VERSION%20%7D%7D-src.tar.gz" + sha256sum: 7d48e61b5464a76543fecf5655d06215cf8674d248b28bc74f613bc8aa047d33 + +hadoop_common_java_home: "{{ oraclejdk_link }}" +hadoop_common_env: "{{ HADOOP_COMMON_HOME }}/hadoop_env" + +# +# OS packages +# +hadoop_common_debian_pkgs: + - gcc + - build-essential + - make + - cmake + - automake + - autoconf + - libtool + - zlib1g-dev + - maven + +hadoop_common_redhat_pkgs: [] + +# +# Vars are used to fill in the following files: +# core-site.xml +# hdfs-site.xml +# mapred-site.xml +# yarn-site.xml +# +MAPRED_SITE_DEFAULT_CONFIG: + mapreduce.framework.name: "yarn" + +YARN_SITE_DEFAULT_CONFIG: + yarn.nodemanager.aux-services: "mapreduce_shuffle" + yarn.nodemanager.aux-services.mapreduce.shuffle.class: "org.apache.hadoop.mapred.ShuffleHandler" + yarn.log-aggregation-enable: "true" + # 24 hour log retention + yarn.log-aggregation.retain-seconds: 86400 + # Checking virtual memory usage causes too many spurious failures. + yarn.nodemanager.vmem-check-enabled: false + +HADOOP_CORE_SITE_DEFAULT_CONFIG: + fs.default.name: "hdfs://localhost:9000" + +HDFS_SITE_DEFAULT_CONFIG: + dfs.replication: "1" + dfs.namenode.name.dir: "file:{{ HADOOP_COMMON_DATA }}/namenode" + dfs.datanode.data.dir: "file:{{ HADOOP_COMMON_DATA }}/datanode" + +# +# MapReduce/Yarn memory config (defaults for m1.medium) +# http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/TaskConfiguration_H2.html +# +# mapred_site_config: +# mapreduce.map.memory_mb: 768 +# mapreduce.map.java.opts: '-Xmx512M' +# mapreduce.reduce.memory.mb: 1024 +# mapreduce.reduce.java.opts: '-Xmx768M' + +# yarn_site_config: +# yarn.app.mapreduce.am.resource.mb: 1024 +# yarn.scheduler.minimum-allocation-mb: 32 +# yarn.scheduler.maximum-allocation-mb: 2048 +# yarn.nodemanager.resource.memory-mb: 2048 +# yarn.nodemanager.vmem-pmem-ratio: 2.1 + +# +# Variables override the stock configuration for entry into +# the following files. Ensure that you use unambiguous +# string literals to avoid any confusion: +# core-site.xml +# hdfs-site.xml +# mapred-site.xml +# yarn-site.xml +# +mapred_site_config: {} +yarn_site_config: {} +HADOOP_CORE_SITE_EXTRA_CONFIG: {} +HDFS_SITE_EXTRA_CONFIG: {} + +# Define all the services here, assuming we have a single-node Hadoop +# cluster. +hadoop_common_services: + - hdfs-namenode + - hdfs-datanode + - yarn-resourcemanager + - yarn-nodemanager + - yarn-proxyserver + - mapreduce-historyserver diff --git a/playbooks/roles/hadoop_common/meta/main.yml b/playbooks/roles/hadoop_common/meta/main.yml new file mode 100644 index 00000000000..b634f7de7fd --- /dev/null +++ b/playbooks/roles/hadoop_common/meta/main.yml @@ -0,0 +1,14 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role hadoop_common + +dependencies: + - oraclejdk \ No newline at end of file diff --git a/playbooks/roles/hadoop_common/tasks/main.yml b/playbooks/roles/hadoop_common/tasks/main.yml new file mode 100644 index 00000000000..c0ba9733f50 --- /dev/null +++ b/playbooks/roles/hadoop_common/tasks/main.yml @@ -0,0 +1,231 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role hadoop_common +# +# Overview: +# +# This role installs all hadoop services onto the machine. Note that this should +# be used to configure all machines in a hadoop cluster. It does not perform +# any role-specific actions such as formatting the namenode etc. +# +# Dependencies: +# +# oraclejdk: Not strictly required, but we tend to trust it more than openjdk. +# + +- name: install system packages + apt: + pkg: "{{ item }}" + state: present + with_items: "{{ hadoop_common_debian_pkgs }}" + +# Update procps to get fix to +# https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1637026, which +# causes hadoop jobs to fail on Xenial. +- name: update system packages + apt: + pkg: "{{ item }}" + state: latest + with_items: + - procps + +- name: ensure group exists + group: + name: "{{ hadoop_common_group }}" + system: yes + state: present + +- name: ensure user exists + user: + name: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + home: "{{ HADOOP_COMMON_USER_HOME }}" + createhome: yes + shell: /bin/bash + system: yes + generate_ssh_key: yes + state: present + +- name: own key authorized + file: + src: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/id_rsa.pub" + dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/authorized_keys" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: link + +- name: ssh configured + template: + src: hadoop_user_ssh_config.j2 + dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/config" + mode: 0600 + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + +- name: ensure user is in sudoers + lineinfile: + dest: /etc/sudoers + state: present + regexp: '^%hadoop ALL\=' + line: '%hadoop ALL=(ALL) NOPASSWD:ALL' + validate: 'visudo -cf %s' + +- name: check if downloaded and extracted + stat: path={{ HADOOP_COMMON_HOME }} + register: extracted_hadoop_dir + +- name: distribution downloaded + get_url: + url: "{{ hadoop_common_dist.url }}" + sha256sum: "{{ hadoop_common_dist.sha256sum }}" + dest: "{{ hadoop_common_temporary_dir }}" + when: not extracted_hadoop_dir.stat.exists + +- name: distribution extracted + shell: "tar -xzf {{ hadoop_common_temporary_dir }}/{{ hadoop_common_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hadoop-{{ HADOOP_COMMON_VERSION }}" + args: + chdir: "{{ HADOOP_COMMON_USER_HOME }}" + when: not extracted_hadoop_dir.stat.exists + +- name: versioned directory symlink created + file: + src: "{{ HADOOP_COMMON_USER_HOME }}/hadoop-{{ HADOOP_COMMON_VERSION }}" + dest: "{{ HADOOP_COMMON_HOME }}" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: link + +- name: configuration installed + template: + src: "{{ item }}.j2" + dest: "{{ HADOOP_COMMON_CONF_DIR }}/{{ item }}" + mode: 0640 + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + with_items: + - hadoop-env.sh + - mapred-site.xml + - core-site.xml + - hdfs-site.xml + - yarn-site.xml + +- name: service definitions installed + template: + src: "etc/systemd/system/{{ item }}.service.j2" + dest: "/etc/systemd/system/{{ item }}.service" + mode: 0640 + owner: root + group: root + with_items: "{{ hadoop_common_services }}" + tags: + - install + - install:configuration + +- name: hadoop env file exists + file: + path: "{{ hadoop_common_env }}" + state: touch + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + +- name: env vars sourced in bashrc + lineinfile: + dest: "{{ HADOOP_COMMON_USER_HOME }}/.bashrc" + state: present + regexp: "^. {{ hadoop_common_env }}" + line: ". {{ hadoop_common_env }}" + insertbefore: BOF + +- name: env vars sourced in hadoop env + lineinfile: + dest: "{{ hadoop_common_env }}" + state: present + regexp: "^. {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh" + line: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh" + +- name: check if native libraries need to be built + stat: path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built + register: native_libs_built + +- name: protobuf downloaded + get_url: + url: "{{ hadoop_common_protobuf_dist.url }}" + sha256sum: "{{ hadoop_common_protobuf_dist.sha256sum }}" + dest: "{{ hadoop_common_temporary_dir }}" + when: not native_libs_built.stat.exists + +- name: protobuf extracted + shell: "tar -xzf {{ hadoop_common_protobuf_dist.filename }}" + args: + chdir: "{{ hadoop_common_temporary_dir }}" + when: not native_libs_built.stat.exists + +- name: protobuf installed + shell: "./configure --prefix=/usr/local && make && make install" + args: + chdir: "{{ hadoop_common_temporary_dir }}/protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }}" + when: not native_libs_built.stat.exists + +- name: native lib source downloaded + get_url: + url: "{{ hadoop_common_native_dist.url }}" + sha256sum: "{{ hadoop_common_native_dist.sha256sum }}" + dest: "{{ hadoop_common_temporary_dir }}/{{ hadoop_common_native_dist.filename }}" + when: not native_libs_built.stat.exists + +- name: native lib source extracted + shell: "tar -xzf {{ hadoop_common_native_dist.filename }}" + args: + chdir: "{{ hadoop_common_temporary_dir }}" + when: not native_libs_built.stat.exists + +- name: native lib built + shell: "mvn package -X -Pnative -DskipTests" + args: + chdir: "{{ hadoop_common_temporary_dir }}/hadoop-{{ HADOOP_COMMON_VERSION }}-src/hadoop-common-project" + environment: + LD_LIBRARY_PATH: /usr/local/lib + when: not native_libs_built.stat.exists + +- name: old native libs renamed + shell: "mv {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.name }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.new_name }}" + with_items: + - { name: libhadoop.a, new_name: libhadoop32.a } + - { name: libhadoop.so, new_name: libhadoop32.so } + - { name: libhadoop.so.1.0.0, new_name: libhadoop32.so.1.0.0 } + when: not native_libs_built.stat.exists + +- name: new native libs installed + shell: "chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ item }} && cp {{ item }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item }}" + args: + chdir: "{{ hadoop_common_temporary_dir }}/hadoop-{{ HADOOP_COMMON_VERSION }}-src/hadoop-common-project/hadoop-common/target/native/target/usr/local/lib" + with_items: + - libhadoop.a + - libhadoop.so + - libhadoop.so.1.0.0 + when: not native_libs_built.stat.exists + +- name: native lib marker touched + file: + path: "{{ HADOOP_COMMON_USER_HOME }}/.native_libs_built" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: touch + when: not native_libs_built.stat.exists + +- name: service directory exists + file: + path: "{{ HADOOP_COMMON_SERVICES_DIR }}" + mode: "0750" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: directory diff --git a/playbooks/roles/hadoop_common/templates/core-site.xml.j2 b/playbooks/roles/hadoop_common/templates/core-site.xml.j2 new file mode 100644 index 00000000000..8d97d12e430 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/core-site.xml.j2 @@ -0,0 +1,12 @@ +{% do HADOOP_CORE_SITE_DEFAULT_CONFIG.update(HADOOP_CORE_SITE_EXTRA_CONFIG) %} + + + + +{% for key, value in HADOOP_CORE_SITE_DEFAULT_CONFIG.items() %} + + {{ key }} + {{ value }} + +{% endfor %} + diff --git a/playbooks/roles/hadoop_common/templates/etc/systemd/system/hdfs-datanode.service.j2 b/playbooks/roles/hadoop_common/templates/etc/systemd/system/hdfs-datanode.service.j2 new file mode 100644 index 00000000000..fd6e289ddc0 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/etc/systemd/system/hdfs-datanode.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Hadoop Distributed File System Data Node + +[Service] +Type=forking +ExecStart={{ HADOOP_COMMON_HOME }}/sbin/hadoop-daemon.sh --script hdfs start datanode +ExecStop={{ HADOOP_COMMON_HOME }}/sbin/hadoop-daemon.sh --script hdfs stop datanode +User={{ hadoop_common_user }} +Group={{ hadoop_common_group }} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hadoop_common/templates/etc/systemd/system/hdfs-namenode.service.j2 b/playbooks/roles/hadoop_common/templates/etc/systemd/system/hdfs-namenode.service.j2 new file mode 100644 index 00000000000..eb8aa56cda1 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/etc/systemd/system/hdfs-namenode.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Hadoop Distributed File System Name Node + +[Service] +Type=forking +ExecStart={{ HADOOP_COMMON_HOME }}/sbin/hadoop-daemon.sh --script hdfs start namenode +ExecStop={{ HADOOP_COMMON_HOME }}/sbin/hadoop-daemon.sh --script hdfs stop namenode +User={{ hadoop_common_user }} +Group={{ hadoop_common_group }} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hadoop_common/templates/etc/systemd/system/mapreduce-historyserver.service.j2 b/playbooks/roles/hadoop_common/templates/etc/systemd/system/mapreduce-historyserver.service.j2 new file mode 100644 index 00000000000..2977a45d32e --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/etc/systemd/system/mapreduce-historyserver.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Hadoop MapReduce History Server + +[Service] +Type=forking +ExecStart={{ HADOOP_COMMON_HOME }}/sbin/mr-jobhistory-daemon.sh start historyserver +ExecStop={{ HADOOP_COMMON_HOME }}/sbin/mr-jobhistory-daemon.sh stop historyserver +User={{ hadoop_common_user }} +Group={{ hadoop_common_group }} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-nodemanager.service.j2 b/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-nodemanager.service.j2 new file mode 100644 index 00000000000..43f8b3f562a --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-nodemanager.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Hadoop YARN Node Manager + +[Service] +Type=forking +ExecStart={{ HADOOP_COMMON_HOME }}/sbin/yarn-daemon.sh start nodemanager +ExecStop={{ HADOOP_COMMON_HOME }}/sbin/yarn-daemon.sh stop nodemanager +User={{ hadoop_common_user }} +Group={{ hadoop_common_group }} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-proxyserver.service.j2 b/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-proxyserver.service.j2 new file mode 100644 index 00000000000..632806781cd --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-proxyserver.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Hadoop YARN Proxy Server + +[Service] +Type=forking +ExecStart={{ HADOOP_COMMON_HOME }}/sbin/yarn-daemon.sh start proxyserver +ExecStop={{ HADOOP_COMMON_HOME }}/sbin/yarn-daemon.sh stop proxyserver +User={{ hadoop_common_user }} +Group={{ hadoop_common_group }} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-resourcemanager.service.j2 b/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-resourcemanager.service.j2 new file mode 100644 index 00000000000..dadcfa3f359 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/etc/systemd/system/yarn-resourcemanager.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Hadoop YARN Resource Manager + +[Service] +Type=forking +ExecStart={{ HADOOP_COMMON_HOME }}/sbin/yarn-daemon.sh start resourcemanager +ExecStop={{ HADOOP_COMMON_HOME }}/sbin/yarn-daemon.sh stop resourcemanager +User={{ hadoop_common_user }} +Group={{ hadoop_common_group }} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hadoop_common/templates/hadoop-env.sh.j2 b/playbooks/roles/hadoop_common/templates/hadoop-env.sh.j2 new file mode 100644 index 00000000000..205c3f13d9e --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/hadoop-env.sh.j2 @@ -0,0 +1,45 @@ +export JAVA_HOME={{ hadoop_common_java_home }} + +export COMMON_BIN_DIR="{{ COMMON_BIN_DIR }}" +export HADOOP_COMMON_HOME={{ HADOOP_COMMON_HOME }} +export HADOOP_MAPRED_HOME={{ HADOOP_COMMON_HOME }} +export HADOOP_HDFS_HOME={{ HADOOP_COMMON_HOME }} +export YARN_HOME={{ HADOOP_COMMON_HOME }} +export HADOOP_CONF_DIR={{ HADOOP_COMMON_CONF_DIR }} +export HADOOP_COMMON_TOOL_HEAP_MAX="{{ HADOOP_COMMON_TOOL_HEAP_MAX }}" +export HADOOP_COMMON_SERVICE_HEAP_MAX="{{ HADOOP_COMMON_SERVICE_HEAP_MAX }}" +export YARN_HEAPSIZE="$HADOOP_COMMON_SERVICE_HEAP_MAX" + +{% raw %} +export PATH=$PATH:$HADOOP_COMMON_HOME/bin:$HADOOP_COMMON_HOME/sbin:$COMMON_BIN_DIR + +# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. +for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do + if [ "$HADOOP_CLASSPATH" ]; then + export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f + else + export HADOOP_CLASSPATH=$f + fi +done + +# Extra Java runtime options. Empty by default. +export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true" + +# Command specific options appended to HADOOP_OPTS when specified +export HADOOP_NAMENODE_OPTS="-Xmx${HADOOP_COMMON_SERVICE_HEAP_MAX}m -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" +export HADOOP_DATANODE_OPTS="-Xmx${HADOOP_COMMON_SERVICE_HEAP_MAX}m -Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" + +export HADOOP_SECONDARYNAMENODE_OPTS="-Xmx${HADOOP_COMMON_SERVICE_HEAP_MAX}m -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" + +# The following applies to multiple commands (fs, dfs, fsck, distcp etc) +export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_COMMON_TOOL_HEAP_MAX}m $HADOOP_CLIENT_OPTS" + +# The directory where pid files are stored. /tmp by default. +# NOTE: this should be set to a directory that can only be written to by +# the user that will run the hadoop daemons. Otherwise there is the +# potential for a symlink attack. +export HADOOP_PID_DIR=${HADOOP_PID_DIR} + +# A string representing this instance of hadoop. $USER by default. +export HADOOP_IDENT_STRING=$USER +{% endraw %} \ No newline at end of file diff --git a/playbooks/roles/hadoop_common/templates/hadoop_user_ssh_config.j2 b/playbooks/roles/hadoop_common/templates/hadoop_user_ssh_config.j2 new file mode 100644 index 00000000000..b40623f4307 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/hadoop_user_ssh_config.j2 @@ -0,0 +1,5 @@ +Host localhost + StrictHostKeyChecking no + +Host 0.0.0.0 + StrictHostKeyChecking no \ No newline at end of file diff --git a/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 b/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 new file mode 100644 index 00000000000..895f7a57979 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 @@ -0,0 +1,11 @@ +{% do HDFS_SITE_DEFAULT_CONFIG.update(HDFS_SITE_EXTRA_CONFIG) %} + + + +{% for key, value in HDFS_SITE_DEFAULT_CONFIG.items() %} + + {{ key }} + {{ value }} + +{% endfor %} + diff --git a/playbooks/roles/hadoop_common/templates/hdfs.conf.j2 b/playbooks/roles/hadoop_common/templates/hdfs.conf.j2 new file mode 100644 index 00000000000..14677f1d3f1 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/hdfs.conf.j2 @@ -0,0 +1,16 @@ +description "hdfs" + +start on starting yarn +stop on stopping yarn + +setuid {{ hadoop_common_user }} + +pre-start script + . {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh + start-dfs.sh +end script + +post-stop script + . {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh + stop-dfs.sh +end script diff --git a/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 b/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 new file mode 100644 index 00000000000..e791db6f4cf --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 @@ -0,0 +1,11 @@ +{% do MAPRED_SITE_DEFAULT_CONFIG.update(mapred_site_config) %} + + + +{% for key, value in MAPRED_SITE_DEFAULT_CONFIG.items() %} + + {{ key }} + {{ value }} + +{% endfor %} + diff --git a/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 b/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 new file mode 100644 index 00000000000..ab4007619de --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 @@ -0,0 +1,10 @@ +{% do YARN_SITE_DEFAULT_CONFIG.update(yarn_site_config) %} + + +{% for key, value in YARN_SITE_DEFAULT_CONFIG.items() %} + + {{ key }} + {{ value }} + +{% endfor %} + diff --git a/playbooks/roles/hadoop_common/templates/yarn.conf.j2 b/playbooks/roles/hadoop_common/templates/yarn.conf.j2 new file mode 100644 index 00000000000..2e02447d0d5 --- /dev/null +++ b/playbooks/roles/hadoop_common/templates/yarn.conf.j2 @@ -0,0 +1,16 @@ +description "yarn" + +start on runlevel [2345] +stop on runlevel [!2345] + +setuid {{ hadoop_common_user }} + +pre-start script + . {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh + start-yarn.sh +end script + +post-stop script + . {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh + stop-yarn.sh +end script diff --git a/playbooks/roles/hadoop_master/defaults/main.yml b/playbooks/roles/hadoop_master/defaults/main.yml new file mode 100644 index 00000000000..e639085bff0 --- /dev/null +++ b/playbooks/roles/hadoop_master/defaults/main.yml @@ -0,0 +1,25 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role hadoop_master +# + +# +# vars are namespace with the module name. +# +hadoop_master_role_name: hadoop_master + +# +# OS packages +# + +hadoop_master_debian_pkgs: [] + +hadoop_master_redhat_pkgs: [] diff --git a/playbooks/roles/hadoop_master/meta/main.yml b/playbooks/roles/hadoop_master/meta/main.yml new file mode 100644 index 00000000000..82ce2f9461e --- /dev/null +++ b/playbooks/roles/hadoop_master/meta/main.yml @@ -0,0 +1,14 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role hadoop_master + +dependencies: + - hadoop_common diff --git a/playbooks/roles/hadoop_master/tasks/main.yml b/playbooks/roles/hadoop_master/tasks/main.yml new file mode 100644 index 00000000000..122e862f9c4 --- /dev/null +++ b/playbooks/roles/hadoop_master/tasks/main.yml @@ -0,0 +1,42 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role hadoop_master +# +# Overview: +# +# Configures the machine to be a Hadoop master node. This generally means that it will +# run the HDFS name node and the yarn resource manager. +# +# Dependencies: +# +# hadoop_common: this role installs hadoop generically +# + +- name: Data directories created + file: + path: "{{ HADOOP_COMMON_DATA }}/{{ item }}" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: directory + with_items: + - namenode + - datanode + +- name: Check if namenode is formatted + stat: + path: "{{ HADOOP_COMMON_DATA }}/namenode/current/VERSION" + register: namenode_version_file + +- name: Namenode formatted + shell: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh && hdfs namenode -format" + become_user: "{{ hadoop_common_user }}" + when: not namenode_version_file.stat.exists diff --git a/playbooks/roles/haproxy/defaults/main.yml b/playbooks/roles/haproxy/defaults/main.yml index b42adf89015..303bb5f9028 100644 --- a/playbooks/roles/haproxy/defaults/main.yml +++ b/playbooks/roles/haproxy/defaults/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Defaults for role haproxy @@ -49,11 +49,30 @@ haproxy_default_config: | # desired applications haproxy_applications: - | - listen rabbitmq 127.0.0.1:5672 + listen rabbitmq 127.0.0.1:35672 mode tcp balance roundrobin option tcplog option tcpka - server rabbit01 172.23.128.10:5672 check inter 5000 rise 2 fall 3 - server rabbit02 172.23.129.10:5672 backup check inter 5000 rise 2 fall 3 - server rabbit03 172.23.130.10:5672 backup check inter 5000 rise 2 fall 3 + server rabbit01 192.168.33.100:5672 check inter 5000 rise 2 fall 3 + server rabbit02 192.168.33.110:5672 check inter 5000 rise 2 fall 3 + server rabbit03 192.168.33.120:5672 check inter 5000 rise 2 fall 3 + + listen mariadb 127.0.0.1:13306 + mode tcp + balance roundrobin + option tcplog + option tcpka + option mysql-check user haproxy + server galera1 192.168.33.100:3306 check weight 1 + server galera2 192.168.33.110:3306 check weight 1 + server galera3 192.168.33.120:3306 check weight 1 + + listen elasticsearch 127.0.0.1:19200 + mode tcp + balance roundrobin + option tcplog + option tcpka + server galera1 192.168.33.100:9200 check weight 1 + server galera2 192.168.33.110:9200 check weight 1 + server galera3 192.168.33.120:9200 check weight 1 diff --git a/playbooks/roles/haproxy/handlers/main.yml b/playbooks/roles/haproxy/handlers/main.yml index 71c3191ce31..5c2dcbfaff7 100644 --- a/playbooks/roles/haproxy/handlers/main.yml +++ b/playbooks/roles/haproxy/handlers/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # diff --git a/playbooks/roles/haproxy/meta/main.yml b/playbooks/roles/haproxy/meta/main.yml index 0fbe1d40ef9..591035273fe 100644 --- a/playbooks/roles/haproxy/meta/main.yml +++ b/playbooks/roles/haproxy/meta/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Role includes for role haproxy @@ -18,3 +18,6 @@ # my_role_var0: "foo" # my_role_var1: "bar" # } + +dependencies: + - common diff --git a/playbooks/roles/haproxy/tasks/main.yml b/playbooks/roles/haproxy/tasks/main.yml index dee2b97bd37..b0c3d649a8f 100644 --- a/playbooks/roles/haproxy/tasks/main.yml +++ b/playbooks/roles/haproxy/tasks/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -22,9 +22,11 @@ notify: restart haproxy - name: Server configuration file - template: > - src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg - owner=root group=root mode=0644 + template: + src: "{{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg" + owner: root + group: root + mode: 0644 notify: reload haproxy - name: Enabled in default diff --git a/playbooks/roles/haproxy/templates/haproxy.cfg.j2 b/playbooks/roles/haproxy/templates/haproxy.cfg.j2 index c2bd1c6fe2b..3ca147d2b3a 100644 --- a/playbooks/roles/haproxy/templates/haproxy.cfg.j2 +++ b/playbooks/roles/haproxy/templates/haproxy.cfg.j2 @@ -1,8 +1,8 @@ # this config needs haproxy-1.1.28 or haproxy-1.2.1 global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice + log /dev/log local0 info + log /dev/log local0 notice #log loghost local0 info maxconn 4096 #chroot /usr/share/haproxy diff --git a/playbooks/roles/hermes/defaults/main.yml b/playbooks/roles/hermes/defaults/main.yml new file mode 100644 index 00000000000..03859ab3f91 --- /dev/null +++ b/playbooks/roles/hermes/defaults/main.yml @@ -0,0 +1,86 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role hermes +# + +# +# vars are namespaced with the module name. +# + +hermes_user: hermes +hermes_role_name: hermes +hermes_service_name: hermes +hermes_source_repo: https://github.com/edx/hermes.git +HERMES_VERSION: master +hermes_app_dir: "{{ COMMON_APP_DIR }}/{{ hermes_service_name }}" +hermes_download_dir: "{{ hermes_app_dir }}/downloads" +hermes_code_dir: "{{ hermes_app_dir }}/{{ hermes_service_name }}" +hermes_venv_dir: "{{ hermes_app_dir }}/venvs/{{ hermes_service_name }}" +hermes_venv_bin: "{{ hermes_venv_dir }}/bin" +hermes_manifest_yaml_file_path: "{{ COMMON_CFG_DIR }}/{{ hermes_service_name }}.yml" +hermes_private_key_file_path: "{{ hermes_app_dir }}/{{ hermes_service_name }}-private-key" +hermes_environment: + PATH: "{{ hermes_venv_dir }}/bin:{{ ansible_env.PATH }}" +# +# OS packages +# + +hermes_debian_pkgs: [] + +hermes_redhat_pkgs: [] + +# the name of the yaml file on disk that the application is looking for without the suffix. +# this is usually the play used to configure the service, but not always. +HERMES_TARGET_SERVICE: "Not configured" + +# HERMES_INTERVAL is how often to check S3 for updates +HERMES_INTERVAL: 60 + +# HERMES_JITTER is how much jitter to add to HERMES_INTERVAL. +# Each time hermes sleeps between check is a random number between HERMES_INTERVAL and HERMES_INTERVAL+HERMES_JITTER +HERMES_JITTER: 40 + +# Enable pre_hermes_checks.sh in systemd service file +HERMES_ENABLE_PRE_CHECK_SCRIPT: False + +# Where to download config file from, start with s3:// or https:// +HERMES_REMOTE_FILE_LOCATION: "Not configured" + +# Where to download the file from eg: s3://some-bucket/{{ HERMES_TARGET_SERVICE }}.yml +HERMES_REMOTE_FILE_PATH: "{{ HERMES_REMOTE_FILE_LOCATION }}/{{ COMMON_ENVIRONMENT }}/{{ HERMES_TARGET_SERVICE }}.yml" + +# Where to download the file to +HERMES_LOCAL_FILE_PATH: "{{ hermes_download_dir }}/{{ HERMES_TARGET_SERVICE }}.yml" + +# How to copy the downloaded file to the config path the application expects +HERMES_COPY_COMMAND: /bin/cp {{ hermes_download_dir }}/{{ HERMES_TARGET_SERVICE }}.yml {{ COMMON_CFG_DIR }}/{{ HERMES_TARGET_SERVICE }}.yml + +# How to restart the application +HERMES_RELOAD_COMMAND: /edx/bin/supervisorctl signal HUP all + +# Hermes typically reloads one config file and reloads all the services on that box, typically 1 service for us +# if you need to do something more complex like reloading multiple services, running additional commands etc +# you could overwrite HERMES_SERVICE_CONFIG directly and ignore these defaults +HERMES_SERVICE_CONFIG: + - url: '{{ HERMES_REMOTE_FILE_PATH }}' + filename: '{{ HERMES_LOCAL_FILE_PATH }}' + command: 'sudo {{ HERMES_COPY_COMMAND }} && sudo {{ HERMES_RELOAD_COMMAND }}' + secret_key_files: "{{ HERMES_PRIVATE_KEYS_DICT | map('regex_replace','^(.*)$','/edx/app/hermes/hermes-\\1') | join(',') if HERMES_PRIVATE_KEYS_DICT is defined else None }}" + +# These are dropped into sudoers for the user that runs this program, care should be taken to ensure they are safe +# to run. By default we assume the 1 service per box and restart supervisor model. If you did something custom with +# HERMES_SERVICE_CONFIG you may need to make adjustments here as well to give the application user permission to perform the actions +# in its service config +HERMES_ALLOWED_SUDO_COMMANDS: + - "{{ HERMES_COPY_COMMAND }}" + - "{{ HERMES_RELOAD_COMMAND }}" + +HERMES_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ hermes_service_name }}" diff --git a/playbooks/roles/hermes/files/pre_hermes_checks.sh b/playbooks/roles/hermes/files/pre_hermes_checks.sh new file mode 100644 index 00000000000..9dfdce8c3cb --- /dev/null +++ b/playbooks/roles/hermes/files/pre_hermes_checks.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# This is only relevant for AWS instances, and shouldnt be added or run otherwise. +# This script exists because when we build amis we take a snapshot and when we take this snapshot +# the best practice is to reboot the instance since if you do not do this reboot the instance's +# file system integrity cannot be guaranteed. +# Since we monitor hermes, this causes errors that are not a problem to be logged when hermes fails to run correctly +# on build boxes. +# This script is run before hermes is started, preventing it from booting during builds. + +# Default startup timeout in systemd is 60 seconds, sleep 50 means we should return before the timeout +sleep_time=50 + +# This is a hack to sleep and then return 1 if on a build box +# The sleep slows down the looping caused by systemd trying to start the service again if it failed. +# Just returning 1 causes tons of "Unit entered failed state" messages. This will reduce them to 1 a minute or so. +if aws sts get-caller-identity --output=text --query 'Arn' | grep -q 'gocd'; then + echo "Detected build server, sleeping ${sleep_time} seconds to reduce log noise" + sleep $sleep_time + exit 1 +fi diff --git a/playbooks/roles/hermes/meta/main.yml b/playbooks/roles/hermes/meta/main.yml new file mode 100644 index 00000000000..4506eeb7b74 --- /dev/null +++ b/playbooks/roles/hermes/meta/main.yml @@ -0,0 +1,29 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role hermes +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } +dependencies: + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ hermes_service_name }}" + edx_service_with_rendered_config_service_config: "{{ HERMES_SERVICE_CONFIG }}" + edx_service_with_rendered_config_user: "{{ hermes_user }}" + edx_service_with_rendered_config_home: "{{ hermes_app_dir }}" + edx_service_with_rendered_config_packages: + debian: "{{ hermes_debian_pkgs }}" + redhat: "{{ hermes_redhat_pkgs }}" diff --git a/playbooks/roles/hermes/tasks/main.yml b/playbooks/roles/hermes/tasks/main.yml new file mode 100644 index 00000000000..8d5f0222def --- /dev/null +++ b/playbooks/roles/hermes/tasks/main.yml @@ -0,0 +1,210 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role hermes +# +# Overview: +# +# Dependencies: +# +# +# Example play: +# +# + +# Install python3.8 on Bionic.Focal comes with python3.8 installed. +- name: install python3.8 + apt: + name: "{{ item }}" + update_cache: yes + register: install_pkgs + until: install_pkgs|success + retries: 10 + delay: 5 + with_items: + - python3.8 + - python3-pip + when: ansible_distribution_release == 'bionic' + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=/usr/bin/python3.8 {{ hermes_venv_dir }}" + args: + creates: "{{ hermes_venv_dir }}/bin/pip" + become_user: "{{ hermes_user }}" + tags: + - install + - install:system-requirements + +- name: git checkout hermes repo into {{ hermes_code_dir }} + git: + dest: "{{ hermes_code_dir }}" + repo: "{{ hermes_source_repo }}" + version: "{{ HERMES_VERSION }}" + accept_hostkey: yes + become_user: "{{ hermes_user }}" + register: hermes_checkout + tags: + - install + - install:code + +- name: Set permission on {{ hermes_app_dir }} + file: + path: "{{ hermes_app_dir }}" + state: directory + recurse: no + owner: "{{ hermes_user }}" + group: "{{ common_web_group }}" + mode: "0750" + tags: + - install + - install:code + +- name: run make requirements + command: make requirements + args: + chdir: "{{ hermes_code_dir }}" + become_user: "{{ hermes_user }}" + environment: "{{ hermes_environment }}" + tags: + - install + - install:app-requirements + +- name: write out the service wrapper + template: + src: "templates/hermes.sh.j2" + dest: "{{ hermes_app_dir }}/{{ hermes_service_name }}.sh" + mode: 0700 + owner: "{{ hermes_user }}" + group: "{{ hermes_user }}" + tags: + - install + - install:configuration + +- name: setup the app env file + template: + src: "templates/hermes_env.j2" + dest: "{{ hermes_app_dir }}/{{ hermes_service_name }}_env" + owner: "{{ hermes_user }}" + group: "{{ hermes_user }}" + mode: 0640 + tags: + - install + - install:configuration + +- name: Create download directory + file: + path: "{{ item }}" + state: directory + owner: "{{ hermes_user }}" + group: "{{ common_web_group }}" + mode: "0750" + with_items: + - "{{ hermes_download_dir }}" + tags: + - install + - install:base + +- name: install private key + copy: + content: "{{ item.value }}" + dest: "{{ hermes_app_dir }}/{{ hermes_service_name }}-{{ item.key }}" + force: yes + owner: "{{ hermes_user }}" + mode: "0600" + no_log: True + with_dict: "{{ HERMES_PRIVATE_KEYS_DICT }}" + when: HERMES_PRIVATE_KEYS_DICT is defined + tags: + - install + - install:base + +- name: Add sudoers entry + template: + src: "templates/sudoers.j2" + dest: "/etc/sudoers.d/hermes" + owner: "root" + group: "root" + mode: 0440 + tags: + - install + - install:base + +- name: create symlinks from the venv bin dir + file: + src: "{{ hermes_venv_dir }}/bin/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ hermes_service_name }}" + state: link + with_items: + - python + - pip + tags: + - install + - install:app-requirements + + +- name: Create hermes systemd job + template: + src: "hermes-systemd.service.j2" + dest: "/etc/systemd/system/{{ hermes_service_name }}.service" + owner: root + group: root + when: ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - install + - install:base + + # This command and the subsequent check in the when condition are related + # to this bug: https://github.com/ansible/ansible-modules-core/issues/593 +- name: Are we in a Docker container + shell: echo $(egrep -q 'docker' /proc/self/cgroup && echo 'yes' || echo 'no') + ignore_errors: yes + register: docker_container + tags: + - install + - install:base + +- name: Enable hermes to start on boot + service: + name: "{{ hermes_service_name }}.service" + enabled: yes + when: (ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal') and docker_container.stdout != 'yes' + tags: + - install + - install:base + +- name: Write the pre_hermes python script + copy: + src: pre_hermes_checks.sh + dest: "{{ hermes_app_dir }}/pre_hermes_checks.sh" + owner: "{{ hermes_user }}" + group: "{{ hermes_user }}" + mode: "0750" + when: HERMES_ENABLE_PRE_CHECK_SCRIPT + +- name: reload systemd configuration + command: systemctl daemon-reload + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: Start hermes + service: + name: "{{ hermes_service_name }}" + state: started + register: start_hermes + when: not disable_edx_services + tags: + - manage + - manage:start diff --git a/playbooks/roles/hermes/templates/hermes-systemd.service.j2 b/playbooks/roles/hermes/templates/hermes-systemd.service.j2 new file mode 100644 index 00000000000..7816b136469 --- /dev/null +++ b/playbooks/roles/hermes/templates/hermes-systemd.service.j2 @@ -0,0 +1,23 @@ +[Unit] +Description=Hermes - Hermes is the messenger/bureaucrat of the edx stack. It fetches documents and files them on a server's filesystem on a regular basis +Documentation=https://github.com/edx/hermes/blob/master/README.md +After=network.target supervisor.service + +[Service] +User={{ hermes_user }} + +Type=simple +TimeoutSec=60 + +Restart=always +RestartSec=1 + +WorkingDirectory={{ hermes_code_dir }} +ExecStart={{ hermes_app_dir }}/{{ hermes_service_name }}.sh -i {{ HERMES_INTERVAL }} -j {{ HERMES_JITTER }} -y {{ hermes_manifest_yaml_file_path }} + +{% if HERMES_ENABLE_PRE_CHECK_SCRIPT %} +ExecStartPre={{ hermes_app_dir }}/pre_hermes_checks.sh +{%- endif %} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hermes/templates/hermes.sh.j2 b/playbooks/roles/hermes/templates/hermes.sh.j2 new file mode 100644 index 00000000000..f2171cc8e49 --- /dev/null +++ b/playbooks/roles/hermes/templates/hermes.sh.j2 @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ hermes_app_dir }}/{{ hermes_service_name }}_env +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = hermes_venv_bin + '/newrelic-admin run-program ' + hermes_venv_bin + '/python' %} +{% else %} +{% set executable = hermes_venv_bin + '/python' %} +{% endif %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_APP_NAME="{{ hermes_service_name }}" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +# We exec so that hermes is the child of systemd and can be managed properly +exec {{ executable }} {{ hermes_code_dir }}/hermes.py $@ diff --git a/playbooks/roles/hermes/templates/hermes_env.j2 b/playbooks/roles/hermes/templates/hermes_env.j2 new file mode 100644 index 00000000000..ff4b674c823 --- /dev/null +++ b/playbooks/roles/hermes/templates/hermes_env.j2 @@ -0,0 +1,6 @@ +# {{ ansible_managed }} +{% for name,value in hermes_environment.items() %} +{%- if value %} +export {{ name }}="{{ value }}" +{%- endif %} +{% endfor %} diff --git a/playbooks/roles/hermes/templates/sudoers.j2 b/playbooks/roles/hermes/templates/sudoers.j2 new file mode 100644 index 00000000000..892c5e76d68 --- /dev/null +++ b/playbooks/roles/hermes/templates/sudoers.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% for line in HERMES_ALLOWED_SUDO_COMMANDS %} +{{ hermes_user }} ALL=(root) NOPASSWD: {{ line }} +{% endfor %} diff --git a/playbooks/roles/hive/defaults/main.yml b/playbooks/roles/hive/defaults/main.yml new file mode 100644 index 00000000000..d6d26115c4a --- /dev/null +++ b/playbooks/roles/hive/defaults/main.yml @@ -0,0 +1,75 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role hive +# + +HIVE_VERSION: 2.1.1 +# This should match the value for SQOOP_MYSQL_CONNECTOR_VERSION in sqoop role. +HIVE_MYSQL_CONNECTOR_VERSION: 5.1.29 +HIVE_HOME: "{{ HADOOP_COMMON_USER_HOME }}/hive" +HIVE_CONF: "{{ HIVE_HOME }}/conf" +HIVE_LIB: "{{ HIVE_HOME }}/lib" + +HIVE_METASTORE_DATABASE_NAME: edx_hive_metastore +HIVE_METASTORE_DATABASE_USER: edx_hive +HIVE_METASTORE_DATABASE_PASSWORD: edx +HIVE_METASTORE_DATABASE_HOST: 127.0.0.1 +HIVE_METASTORE_DATABASE_PORT: 3306 + +HIVE_METASTORE_DATABASE: + user: "{{ HIVE_METASTORE_DATABASE_USER }}" + password: "{{ HIVE_METASTORE_DATABASE_PASSWORD }}" + name: "{{ HIVE_METASTORE_DATABASE_NAME }}" + host: "{{ HIVE_METASTORE_DATABASE_HOST }}" + port: "{{ HIVE_METASTORE_DATABASE_PORT }}" + +# +# Vars are used to fill in the hive-site.xml file +# +HIVE_SITE_DEFAULT_CONFIG: + javax.jdo.option.ConnectionURL: "jdbc:mysql://{{ HIVE_METASTORE_DATABASE.host }}:{{ HIVE_METASTORE_DATABASE.port }}/{{ HIVE_METASTORE_DATABASE.name }}" + javax.jdo.option.ConnectionDriverName: "com.mysql.jdbc.Driver" + javax.jdo.option.ConnectionUserName: "{{ HIVE_METASTORE_DATABASE.user }}" + javax.jdo.option.ConnectionPassword: "{{ HIVE_METASTORE_DATABASE.password }}" + datanucleus.autoCreateSchema: "false" + hive.metastore.schema.verification: "true" + +# +# Variables override the stock configuration for entry into +# the hive-site.xml file. Ensure that you use unambiguous +# string literals to avoid any confusion. +# +HIVE_SITE_EXTRA_CONFIG: {} + +# +# vars are namespace with the module name. +# +hive_role_name: hive +hive_temporary_dir: /var/tmp +hive_dist: + filename: "apache-hive-{{ HIVE_VERSION }}-bin.tar.gz" + url: "https://archive.apache.org/dist/hive/hive-%7B%7B%20HIVE_VERSION%20%7D%7D/apache-hive-%7B%7B%20HIVE_VERSION%20%7D%7D-bin.tar.gz" + sha256sum: c945dfc39f6489a098507fffa8fe78d8bd41de64887439b024c163ba1d958edc + untarred_filename: "apache-hive-{{ HIVE_VERSION }}-bin" +hive_mysql_connector_dist: + filename: "mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}.tar.gz" + url: "http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-%7B%7B%20HIVE_MYSQL_CONNECTOR_VERSION%20%7D%7D.tar.gz" + sha256sum: 04ad83b655066b626daaabb9676a00f6b4bc43f0c234cbafafac1209dcf1be73 + untarred_filename: "mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}" + jarfilename: "mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar" + +# +# OS packages +# + +hive_debian_pkgs: [] + +hive_redhat_pkgs: [] diff --git a/playbooks/roles/hive/meta/main.yml b/playbooks/roles/hive/meta/main.yml new file mode 100644 index 00000000000..3c267c31f85 --- /dev/null +++ b/playbooks/roles/hive/meta/main.yml @@ -0,0 +1,14 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role hive + +dependencies: + - hadoop_common diff --git a/playbooks/roles/hive/tasks/main.yml b/playbooks/roles/hive/tasks/main.yml new file mode 100644 index 00000000000..97416fbc25e --- /dev/null +++ b/playbooks/roles/hive/tasks/main.yml @@ -0,0 +1,91 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role hive +# +# Overview: +# +# Install and configure Hive. +# +# Dependencies: +# +# hadoop_common: Hive requires Hadoop to be installed to function properly. + + +- name: check if downloaded and extracted + stat: + path: "{{ HIVE_HOME }}" + register: extracted_dir + +- name: distribution downloaded + get_url: + url: "{{ hive_dist.url }}" + sha256sum: "{{ hive_dist.sha256sum }}" + dest: "{{ hive_temporary_dir }}" + when: not extracted_dir.stat.exists + +- name: distribution extracted + shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ hive_dist.untarred_filename }}" + args: + chdir: "{{ HADOOP_COMMON_USER_HOME }}" + when: not extracted_dir.stat.exists + +- name: versioned directory symlink created + file: + src: "{{ HADOOP_COMMON_USER_HOME }}/{{ hive_dist.untarred_filename }}" + dest: "{{ HIVE_HOME }}" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: link + +- name: hive mysql connector distribution downloaded + get_url: + url: "{{ hive_mysql_connector_dist.url }}" + sha256sum: "{{ hive_mysql_connector_dist.sha256sum }}" + dest: "{{ hive_temporary_dir }}" + when: not extracted_dir.stat.exists + +- name: hive mysql connector distribution extracted + shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_mysql_connector_dist.filename }}" + args: + chdir: "{{ hive_temporary_dir }}" + when: not extracted_dir.stat.exists + +- name: hive lib exists + file: + path: "{{ HIVE_LIB }}" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: directory + +- name: hive mysql connector installed + shell: "cp {{ hive_mysql_connector_dist.jarfilename }} {{ HIVE_LIB }} && chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HIVE_LIB }}/{{ hive_mysql_connector_dist.jarfilename }}" + args: + chdir: "/{{ hive_temporary_dir }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}" + when: not extracted_dir.stat.exists + +- name: configuration installed + template: + src: "{{ item }}.j2" + dest: "{{ HIVE_CONF }}/{{ item }}" + mode: 0640 + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + with_items: + - hive-env.sh + - hive-site.xml + +- name: env vars sourced in hadoop env + lineinfile: + dest: "{{ hadoop_common_env }}" + state: present + regexp: "^. {{ HIVE_CONF }}/hive-env.sh" + line: ". {{ HIVE_CONF }}/hive-env.sh" diff --git a/playbooks/roles/hive/templates/hive-env.sh.j2 b/playbooks/roles/hive/templates/hive-env.sh.j2 new file mode 100644 index 00000000000..69b5b3a1b91 --- /dev/null +++ b/playbooks/roles/hive/templates/hive-env.sh.j2 @@ -0,0 +1,4 @@ +#!/bin/bash + +export HIVE_HOME={{ HIVE_HOME }} +export PATH=$PATH:$HIVE_HOME/bin diff --git a/playbooks/roles/hive/templates/hive-site.xml.j2 b/playbooks/roles/hive/templates/hive-site.xml.j2 new file mode 100644 index 00000000000..26d5b9ed9a9 --- /dev/null +++ b/playbooks/roles/hive/templates/hive-site.xml.j2 @@ -0,0 +1,11 @@ +{% do HIVE_SITE_DEFAULT_CONFIG.update(HIVE_SITE_EXTRA_CONFIG) %} + + + +{% for key, value in HIVE_SITE_DEFAULT_CONFIG.items() %} + + {{ key }} + {{ value }} + +{% endfor %} + diff --git a/playbooks/roles/hotg/defaults/main.yml b/playbooks/roles/hotg/defaults/main.yml new file mode 100644 index 00000000000..d821e7a41d7 --- /dev/null +++ b/playbooks/roles/hotg/defaults/main.yml @@ -0,0 +1,116 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Defaults for role hotg +# +hotg_role_name: "hotg" +HOTG_SERVICE_NAME: "hotg" +HOTG_USER: "{{ HOTG_SERVICE_NAME }}" + +# AWS Account details +HOTG_ACCOUNT_ID: !!null +HOTG_ACCOUNT_NAME: "my aws account" +HOTG_AWS_ACCESS_ID: !!null +HOTG_AWS_SECRET_KEY: !!null + +HOTG_GITHUB_OAUTH_KEY: "replace with your key" +HOTG_GITHUB_OAUTH_SECRET: "replace with your secret" +hotg_github_oauth_scope: "user:email,read:org" + +HOTG_GITHUB_EMAIL_REGEX: "/@example.com$/" +HOTG_GITHUB_TEAM_REGEX: "/^your-github-group$/" + +HOTG_GITHUB_ORG: "edx" +HOTG_GITHUB_ORG_ID: "111111" + +HOTG_APITOKEN_ENABLED: "true" +# A list to allow graceful retirment, first item used for +# new requests. +HOTG_APITOKEN_ENCRYPTION_KEYS: + - "CHANGEME" + +# Java tuning +HOTG_JAVA_MIN_HEAP: "2g" +HOTG_JAVA_MAX_HEAP: "2g" +HOTG_JAVA_MAX_PERM: "128m" + +# The build of our Asgard fork to deploy +HOTG_GIT_REVISION: "b813d0612c9da8b2a38c6d12c8d9020554528436" + +## Authentication configuration +HOTG_PORT: 8090 +HOTG_URL: "http://localhost:{{ HOTG_PORT }}" +HOTG_CALLBACK_URI: "{{ HOTG_URL }}/auth/signIn" +HOTG_SUCCESS_URI: "{{ HOTG_URL }}" +HOTG_AUTHENTICATION_PROVIDER: "githubOauthAuthenticationProvider" + +# Instance types configuration, e.g. +# +# HOTG_ADDITIONAL_INSTANCE_TYPES: +# t3.xlarge: # Required, AWS instance type +# price: 0.052 # Required, must be a number +# # Remaining fields are optional strings, e.g. +# family: 'Burstable' +# group: 't3' +# vCpu: '4' +# mem: '16.00' +HOTG_ADDITIONAL_INSTANCE_TYPES: {} + +# Email configuration +HOTG_EMAIL_FROM_ADDRESS: "asgard@example.com" +HOTG_EMAIL_SYSTEM_FROM_ADDRESS: "asgard@example.com" +HOTG_SMTP_HOST: "localhost" +HOTG_SMTP_PORT: 25 + +# General configuration +HOTG_AWS_REGIONS: + - "us-east-1" + - "us-west-1" + - "us-west-2" + - "eu-west-1" + - "sa-east-1" + - "ap-northeast-1" + - "ap-southeast-1" + - "ap-southeast-2" + +HOTG_MAX_GROUPS: 6 + +# +# vars are namespace with the module name. +# + +HOTG_TOMCAT_VERSION: "7.0.54" +hotg_tomcat_package: "https://s3.amazonaws.com/edx-static/tomcat/apache-tomcat-%7B%7B%20HOTG_TOMCAT_VERSION%20%7D%7D.tar.gz" +HOTG_TOMCAT_SHUTDOWN_PORT: 8005 +HOTG_TOMCAT_CONNECTOR_PORT: 8090 +HOTG_TOMCAT_REDIRECT_PORT: 8443 +HOTG_TOMCAT_AJP_PORT: 8009 +HOTG_TOMCAT_HOME: "{{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}" + +# For managing ssl termination via ELB or NGINX. +HOTG_SSL_TERMINATED_ELSEWHERE: false +HOTG_TOMCAT_SSL_PORT: 443 +HOTG_TOMCAT_PROXY_NAME: "hotg.example.com" +HOTG_NGINX_PORT: 80 + +hotg_app_dir: "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" +hotg_home: "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" +hotg_data_dir: "{{ hotg_home }}/data" +hotg_log_dir: "{{ COMMON_LOG_DIR }}/{{ HOTG_SERVICE_NAME }}" + +# +# OS packages +# + +hotg_debian_pkgs: [] + +hotg_pip_pkgs: [] + +hotg_redhat_pkgs: [] diff --git a/playbooks/roles/hotg/meta/main.yml b/playbooks/roles/hotg/meta/main.yml new file mode 100644 index 00000000000..93185a73030 --- /dev/null +++ b/playbooks/roles/hotg/meta/main.yml @@ -0,0 +1,24 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role hotg +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - role: oraclejdk + - supervisor diff --git a/playbooks/roles/hotg/tasks/deploy.yml b/playbooks/roles/hotg/tasks/deploy.yml new file mode 100644 index 00000000000..dcb4f1ac05b --- /dev/null +++ b/playbooks/roles/hotg/tasks/deploy.yml @@ -0,0 +1,101 @@ +--- + +- name: create supervisor wrapper + template: > + src=edx/app/hotg/supervisor_wrapper.sh.j2 + dest={{ hotg_app_dir }}/supervisor_wrapper.sh + owner={{ HOTG_USER }} group={{ common_web_user }} mode=0750 + tags: + - install + - install:base + +- name: create supervisor script + template: > + src=edx/app/supervisor/conf.d/hotg.conf.j2 + dest={{ supervisor_available_dir }}/{{ HOTG_SERVICE_NAME }}.conf + owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 + tags: + - install + - install:base + +- name: enable supervisor scripts + file: > + src={{ supervisor_available_dir }}/{{ HOTG_SERVICE_NAME }}.conf + dest={{ supervisor_cfg_dir }}/{{ HOTG_SERVICE_NAME }}.conf + owner={{ supervisor_user }} group={{ common_web_user }} + mode=0644 state=link force=yes + when: not disable_edx_services + tags: + - manage + - manage:update + +- name: update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:update + +- name: stop the service + supervisorctl: > + state=stopped + supervisorctl_path={{ supervisor_ctl }} + config={{ supervisor_cfg }} + name="{{ HOTG_SERVICE_NAME }}" + become: true + become_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:stop + +- name: create hotg application config + template: > + src=edx/app/hotg/Config.groovy.j2 + dest={{ hotg_app_dir }}/Config.groovy + mode=0644 + become: true + become_user: "{{ HOTG_USER }}" + tags: + - install + - install:configuration + + # + # Workaround a bug in ansible where it fails if it cannot verify + # a files md5sum, which it cannot do for large files +- name: remove old war + file: > + path={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/ROOT.war + state=absent + tags: + - install + - install:base + + +- name: grab the war file from s3 + get_url: + url: "https://files.edx.org/devops/jenkins_assets/asgard.war.%7B%7B%20HOTG_GIT_REVISION%20%7D%7D/asgard.war" + dest: "{{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/ROOT.war" + force: yes + tags: + - install + - install:base + +- name: remove exploded war directory + file: > + path={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/ROOT + state=absent + tags: + - install + - install:base + +- name: start the service + supervisorctl: > + state=started + supervisorctl_path={{ supervisor_ctl }} + config={{ supervisor_cfg }} + name="{{ HOTG_SERVICE_NAME }}" + become: true + become_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:start diff --git a/playbooks/roles/hotg/tasks/main.yml b/playbooks/roles/hotg/tasks/main.yml new file mode 100644 index 00000000000..c048aaa0be0 --- /dev/null +++ b/playbooks/roles/hotg/tasks/main.yml @@ -0,0 +1,141 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role hotg +# +# Overview: +# +# Deploys an edX forked version of Netflix's Asgard +# under the name Hammer of the gods. +# +# Dependencies: +# +# Example play: +# +# - name: Configure instance(s) +# hosts: hotg +# sudo: True +# vars_files: +# - "{{ secure_dir }}/vars/common/common.yml" +# - "{{ secure_dir }}/vars/users.yml" +# - "{{ secure_dir }}/vars/env/hotg.yml" +# gather_facts: True +# roles: +# - common +# - gh_users +# - oraclejdk +# - splunkforwarder +# - hotg +# + +- name: create application user + user: > + name="{{ HOTG_SERVICE_NAME }}" + home="{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" + createhome=no + shell=/bin/false + tags: + - install + - install:base + +- name: create common directories + file: > + path="{{ item }}" + state=directory + owner="{{ HOTG_SERVICE_NAME }}" + group="{{ common_web_group }}" + with_items: + - "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" + - "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}/data" + tags: + - install + - install:base + +- name: create directories owned by www-data + file: > + path="{{ item }}" + state=directory + owner="{{ common_web_group }}" + group="{{ common_web_group }}" + with_items: + - "{{ COMMON_LOG_DIR }}/{{ HOTG_SERVICE_NAME }}" + tags: + - install + - install:base + +- name: install a bunch of system packages on which hotg relies + apt: pkg={{ item }} state=present + with_items: "{{ hotg_debian_pkgs }}" + when: ansible_distribution in common_debian_variants + tags: + - install + - install:base + +- name: install a bunch of system packages on which hotgs relies + yum: pkg={{ item }} state=present + with_items: hotg_redhat_pkgs + when: ansible_distribution in common_redhat_variants + tags: + - install + - install:base + +# +# Install tomcat +# +- name: download the tomcat archive + get_url: > + dest="/tmp/{{ hotg_tomcat_package|basename }}" + url="{{ hotg_tomcat_package }}" + register: download_tomcat + tags: + - install + - install:base + +- name: explode the archive + shell: > + tar xf /tmp/{{ hotg_tomcat_package|basename }} + creates={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }} + chdir={{ hotg_app_dir }} + tags: + - install + - install:base + +- name: chown of the tomcat dir + command: chown -R {{ common_web_user }} "{{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}" + tags: + - install + - install:base + +- name: create hotg tomcat server.xml config + template: > + src=edx/app/hotg/server.xml.j2 + dest={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/conf/server.xml + mode=0644 + owner="root" + group="{{ HOTG_USER }}" + tags: + - install + - install:configuratin + +- name: remove unneeded webapps + file: > + path={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/{{ item }} + state=absent + with_items: + - docs + - examples + - host-manager + - manager + tags: + - install + - install:base + +- include: deploy.yml tags=deploy diff --git a/playbooks/roles/hotg/templates/edx/app/hotg/Config.groovy.j2 b/playbooks/roles/hotg/templates/edx/app/hotg/Config.groovy.j2 new file mode 100644 index 00000000000..118608c2c78 --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/hotg/Config.groovy.j2 @@ -0,0 +1,403 @@ +import com.netflix.asgard.model.HardwareProfile +import com.netflix.asgard.model.InstanceTypeData + +grails { + awsAccounts=['{{ HOTG_ACCOUNT_ID }}'] + awsAccountNames=['{{ HOTG_ACCOUNT_ID }}':'{{ HOTG_ACCOUNT_NAME }}'] + serverUrl='{{ HOTG_URL }}' +} + +secret { + accessId='{{ HOTG_AWS_ACCESS_ID }}' + secretKey='{{ HOTG_AWS_SECRET_KEY }}' +} + +cloud { + accountName='{{ HOTG_ACCOUNT_NAME }}' + publicResourceAccounts=['amazon'] + customInstanceTypes = [ + new InstanceTypeData(linuxOnDemandPrice: 0.085, hardwareProfile: + new HardwareProfile(instanceType: 'c5.large', + family: 'Compute Optimized', group: 'c5', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '8', + mem: '4.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.170, hardwareProfile: + new HardwareProfile(instanceType: 'c5.xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '16', + mem: '8.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.340, hardwareProfile: + new HardwareProfile(instanceType: 'c5.2xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '31', + mem: '16.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.680, hardwareProfile: + new HardwareProfile(instanceType: 'c5.4xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '62', + mem: '32.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 1.530, hardwareProfile: + new HardwareProfile(instanceType: 'c5.9xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Nine Extra Large', arch: '64-bit', vCpu: '36', ecu: '132', + mem: '72.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 3.060, hardwareProfile: + new HardwareProfile(instanceType: 'c5.18xlarge', + family: 'Compute Optimized', group: 'c5', + size: '18 Extra Large', arch: '64-bit', vCpu: '72', ecu: '264', + mem: '144.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '25 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.126, hardwareProfile: + new HardwareProfile(instanceType: 'r5.large', + family: 'Memory Optimized', group: 'r5', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '8', + mem: '16.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.252, hardwareProfile: + new HardwareProfile(instanceType: 'r5.xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '16', + mem: '32.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.504, hardwareProfile: + new HardwareProfile(instanceType: 'r5.2xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '31', + mem: '64.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 1.008, hardwareProfile: + new HardwareProfile(instanceType: 'r5.4xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '62', + mem: '128.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 3.024, hardwareProfile: + new HardwareProfile(instanceType: 'r5.12xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Eight Extra Large', arch: '64-bit', vCpu: '36', ecu: '132', + mem: '384.0', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 6.048, hardwareProfile: + new HardwareProfile(instanceType: 'r5.24xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Eight Extra Large', arch: '64-bit', vCpu: '72', ecu: '264', + mem: '768.0', storage: 'EBS only', ebsOptim: '-', + netPerf: '25 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.096, hardwareProfile: + new HardwareProfile(instanceType: 'm5.large', + family: 'General Purpose', group: 'm5', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '6.5', + mem: '8.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Moderate')), + new InstanceTypeData(linuxOnDemandPrice: 0.192, hardwareProfile: + new HardwareProfile(instanceType: 'm5.xlarge', + family: 'General Purpose', group: 'm5', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '13', + mem: '16.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.384, hardwareProfile: + new HardwareProfile(instanceType: 'm5.2xlarge', + family: 'General Purpose', group: 'm5', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '26', + mem: '32.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.768, hardwareProfile: + new HardwareProfile(instanceType: 'm5.4xlarge', + family: 'General Purpose', group: 'm5', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '53.5', + mem: '64.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 2.304, hardwareProfile: + new HardwareProfile(instanceType: 'm5.12xlarge', + family: 'General Purpose', group: 'm5', + size: '12 Extra Large', arch: '64-bit', vCpu: '48', ecu: '124.5', + mem: '192.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 4.608, hardwareProfile: + new HardwareProfile(instanceType: 'm5.24xlarge', + family: 'General Purpose', group: 'm5', + size: '24 Extra Large', arch: '64-bit', vCpu: '96', ecu: '124.5', + mem: '384.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.006, hardwareProfile: + new HardwareProfile(instanceType: 't2.nano', + family: 'Burstable', group: 't2', + size: 'Nano', arch: '64-bit', vCpu: '1', ecu: '1', + mem: '0.50', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.013, hardwareProfile: + new HardwareProfile(instanceType: 't2.micro', + family: 'Burstable', group: 't2', + size: 'Micro', arch: '64-bit', vCpu: '1', ecu: '1', + mem: '1.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.026, hardwareProfile: + new HardwareProfile(instanceType: 't2.small', + family: 'Burstable', group: 't2', + size: 'Small', arch: '64-bit', vCpu: '1', ecu: '1', + mem: '2.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.052, hardwareProfile: + new HardwareProfile(instanceType: 't2.medium', + family: 'Burstable', group: 't2', + size: 'Medium', arch: '64-bit', vCpu: '2', ecu: '3', + mem: '4.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.052, hardwareProfile: + new HardwareProfile(instanceType: 't2.large', + family: 'Burstable', group: 't2', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '6', + mem: '4.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.120, hardwareProfile: + new HardwareProfile(instanceType: 'm4.large', + family: 'General Purpose', group: 'm4', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '6.5', + mem: '8.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Moderate')), + new InstanceTypeData(linuxOnDemandPrice: 0.239, hardwareProfile: + new HardwareProfile(instanceType: 'm4.xlarge', + family: 'General Purpose', group: 'm4', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '13', + mem: '16.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.479, hardwareProfile: + new HardwareProfile(instanceType: 'm4.2xlarge', + family: 'General Purpose', group: 'm4', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '26', + mem: '32.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.958, hardwareProfile: + new HardwareProfile(instanceType: 'm4.4xlarge', + family: 'General Purpose', group: 'm4', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '53.5', + mem: '64.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 2.394, hardwareProfile: + new HardwareProfile(instanceType: 'm4.10xlarge', + family: 'General Purpose', group: 'm4', + size: 'Deca Extra Large', arch: '64-bit', vCpu: '40', ecu: '124.5', + mem: '160.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.105, hardwareProfile: + new HardwareProfile(instanceType: 'c4.large', + family: 'Compute Optimized', group: 'c4', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '8', + mem: '3.75', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Moderate')), + new InstanceTypeData(linuxOnDemandPrice: 0.209, hardwareProfile: + new HardwareProfile(instanceType: 'c4.xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '16', + mem: '7.5', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.419, hardwareProfile: + new HardwareProfile(instanceType: 'c4.2xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '31', + mem: '15.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.838, hardwareProfile: + new HardwareProfile(instanceType: 'c4.4xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '62', + mem: '30.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 1.675, hardwareProfile: + new HardwareProfile(instanceType: 'c4.8xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Eight Extra Large', arch: '64-bit', vCpu: '36', ecu: '132', + mem: '60.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.004700, hardwareProfile: + new HardwareProfile(instanceType: 't3a.nano', + family: 'General Purpose', group: 't3a', + size: 't3a.nano', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '0.50', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.075200, hardwareProfile: + new HardwareProfile(instanceType: 't3a.micro', + family: 'General Purpose', group: 't3a', + size: 't3a.micro', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '1.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.018800, hardwareProfile: + new HardwareProfile(instanceType: 't3a.small', + family: 'General Purpose', group: 't3a', + size: 't3a.small', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '2.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.037600, hardwareProfile: + new HardwareProfile(instanceType: 't3a.medium', + family: 'General Purpose', group: 't3a', + size: 't3a.medium', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '4.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.075200, hardwareProfile: + new HardwareProfile(instanceType: 't3a.large', + family: 'General Purpose', group: 't3a', + size: 't3a.large', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '8.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.150400, hardwareProfile: + new HardwareProfile(instanceType: 't3a.xlarge', + family: 'General Purpose', group: 't3a', + size: 't3a.xlarge', arch: '64-bit', vCpu: '4', ecu: 'n/a', + mem: '16.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.300800, hardwareProfile: + new HardwareProfile(instanceType: 't3a.2xlarge', + family: 'General Purpose', group: 't3a', + size: 't3a.2xlarge', arch: '64-bit', vCpu: '8', ecu: 'n/a', + mem: '32.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.300800, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.large', + family: 'General Purpose', group: 't3a', + size: 'm5a.large', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '8.0', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.172000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.xlarge', arch: '64-bit', vCpu: '4', ecu: 'n/a', + mem: '16.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.344000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.2xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.2xlarge', arch: '64-bit', vCpu: '8', ecu: 'n/a', + mem: '32.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.688000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.4xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.4xlarge', arch: '64-bit', vCpu: '16', ecu: 'n/a', + mem: '64.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 2.064000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.12xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.12xlarge', arch: '64-bit', vCpu: '48', ecu: 'n/a', + mem: '192.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 4.128000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.24xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.24xlarge', arch: '64-bit', vCpu: '96', ecu: 'n/a', + mem: '384.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.103000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.large', + family: 'General Purpose', group: 't3a', + size: 'm5ad.large', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '8.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.206000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.xlarge', arch: '64-bit', vCpu: '4', ecu: 'n/a', + mem: '16.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.412000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.2xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.2xlarge', arch: '64-bit', vCpu: '8', ecu: 'n/a', + mem: '32.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.824000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.4xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.4xlarge', arch: '64-bit', vCpu: '16', ecu: 'n/a', + mem: '64.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 2.472000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.12xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.12xlarge', arch: '64-bit', vCpu: '48', ecu: 'n/a', + mem: '192.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 4.944000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.24xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.24xlarge', arch: '64-bit', vCpu: '96', ecu: 'n/a', + mem: '384.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + {% for instance_type, profile in HOTG_ADDITIONAL_INSTANCE_TYPES.iteritems() %} + + new InstanceTypeData(linuxOnDemandPrice: {{ profile.price }}, hardwareProfile: + new HardwareProfile( + instanceType: '{{ instance_type }}', + family: '{{ profile.family | default("?") }}', + group: '{{ profile.group | default("?") }}', + size: '{{ instance_type }}', + arch: '{{ profile.arch | default("64-bit") }}', + vCpu: '{{ profile.vCpu | default("?") }}', + ecu: '{{ profile.ecu | default("?") }}', + mem: '{{ profile.mem | default("?") }}', + storage: '{{ profile.storage | default("EBS only") }}', + ebsOptim: '{{ profile.ebsOptim | default("?") }}', + netPerf: '{{ profile.netPerf | default("?") }}' + )), + {% endfor %} + ] +} + +cluster { + maxGroups = {{ HOTG_MAX_GROUPS }} +} + +security { + apiToken { + enabled = {{ HOTG_APITOKEN_ENABLED }} + encryptionKeys = [ '{{ HOTG_APITOKEN_ENCRYPTION_KEYS |join("\',\'") }}' ] + } +} + +plugin { + authenticationProvider = '{{ HOTG_AUTHENTICATION_PROVIDER }}' +} + +oauth { + providers { + github { + api = com.netflix.asgard.auth.GitHubApi + key = '{{ HOTG_GITHUB_OAUTH_KEY }}' + secret = '{{ HOTG_GITHUB_OAUTH_SECRET }}' + scope = '{{ hotg_github_oauth_scope }}' + callback = '{{ HOTG_CALLBACK_URI }}' + successUri = '{{ HOTG_SUCCESS_URI }}' + extraArgs { + emailRegex = {{ HOTG_GITHUB_EMAIL_REGEX }} + teamRegex = {{ HOTG_GITHUB_TEAM_REGEX }} + org = '{{ HOTG_GITHUB_ORG }}' + orgId = '{{ HOTG_GITHUB_ORG_ID }}' + } + } + } + } + +email { + userEnabled = true + systemEnabled = true + smtpHost = '{{ HOTG_SMTP_HOST }}' + smtpPort = {{ HOTG_SMTP_PORT }} + smtpUsername = '{{ HOTG_AWS_ACCESS_ID }}' + smtpPassword = '{{ HOTG_AWS_SECRET_KEY }}' + smtpSslEnabled = true + fromAddress = '{{ HOTG_EMAIL_FROM_ADDRESS }}' + systemEmailAddress = '{{ HOTG_EMAIL_FROM_ADDRESS }}' + errorSubjectStart = 'Hammer Error: ' +} + +{% if NEWRELIC_API_KEY is defined and NEWRELIC_ACCOUNT_ID is defined %} +newrelic { + apiKey = '{{ NEWRELIC_API_KEY }}' + accountId = '{{ NEWRELIC_ACCOUNT_ID }}' +} +{% endif %} diff --git a/playbooks/roles/hotg/templates/edx/app/hotg/server.xml.j2 b/playbooks/roles/hotg/templates/edx/app/hotg/server.xml.j2 new file mode 100644 index 00000000000..4940434a5d7 --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/hotg/server.xml.j2 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/playbooks/roles/hotg/templates/edx/app/hotg/supervisor_wrapper.sh.j2 b/playbooks/roles/hotg/templates/edx/app/hotg/supervisor_wrapper.sh.j2 new file mode 100644 index 00000000000..b94b82de49f --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/hotg/supervisor_wrapper.sh.j2 @@ -0,0 +1,29 @@ +#!/bin/bash +# Source: https://confluence.atlassian.com/plugins/viewsource/viewpagesrc.action?pageId=252348917 + +export CATALINA_HOME={{ HOTG_TOMCAT_HOME }} +export TOMCAT_HOME={{ HOTG_TOMCAT_HOME }} +export ASGARD_HOME={{ hotg_app_dir }} +export CATALINA_OUT={{ hotg_log_dir }}/catalina.out + +export CATALINA_OPTS="-Djava.awt.headless=true -Xms{{ HOTG_JAVA_MIN_HEAP }} -Xmx{{ HOTG_JAVA_MAX_HEAP }} -verbose:sizes -XX:MaxPermSize={{ HOTG_JAVA_MAX_PERM }} -XX:+HeapDumpOnOutOfMemoryError -XX:-UseGCOverheadLimit -XX:+ExplicitGCInvokesConcurrent -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -DonlyRegions={%- for region in HOTG_AWS_REGIONS -%}{{region}}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}" +export CATALINA_PID=/tmp/$$ +export LD_LIBRARY_PATH=/usr/local/apr/lib + +function shutdown() +{ + date + echo "Shutting down Tomcat" + $TOMCAT_HOME/bin/catalina.sh stop -force +} + +date +echo "Starting Tomcat" + +. $TOMCAT_HOME/bin/catalina.sh start + +# Allow any signal which would kill a process to stop Tomcat +trap shutdown HUP INT QUIT ABRT KILL ALRM TERM TSTP + +echo "Waiting for `cat $CATALINA_PID`" +wait `cat $CATALINA_PID` diff --git a/playbooks/roles/hotg/templates/edx/app/supervisor/conf.d/hotg.conf.j2 b/playbooks/roles/hotg/templates/edx/app/supervisor/conf.d/hotg.conf.j2 new file mode 100644 index 00000000000..ea3a27339c3 --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/supervisor/conf.d/hotg.conf.j2 @@ -0,0 +1,6 @@ +[program:{{ HOTG_SERVICE_NAME }}] +directory={{ hotg_app_dir }} +command={{ hotg_app_dir }}/supervisor_wrapper.sh +stdout_logfile=syslog +stderr_logfile=syslog +user={{ common_web_user }} \ No newline at end of file diff --git a/playbooks/roles/in_production/tasks/main.yml b/playbooks/roles/in_production/tasks/main.yml deleted file mode 100644 index cd19a50a345..00000000000 --- a/playbooks/roles/in_production/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ -# requires: -# - group_vars/all -# - common/tasks/main.yml -# - nginx/tasks/main.yml -# - lms/tasks/main.yml -# - ruby/tasks/main.yml -# - npm/tasks/main.yml ---- -- name: Make sure edxapp is running - service: name=edxapp state=started - tags: - - production - - update - -- name: Disable HTTP Basic Auth on site - file: path=/etc/nginx/sites-enabled/basic-auth state=absent - notify: restart nginx - tags: - - production - - update diff --git a/playbooks/roles/insights/defaults/main.yml b/playbooks/roles/insights/defaults/main.yml new file mode 100644 index 00000000000..c673e7fe4e9 --- /dev/null +++ b/playbooks/roles/insights/defaults/main.yml @@ -0,0 +1,253 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Defaults for role insights +# + +INSIGHTS_LMS_BASE: 'http://127.0.0.1:8000/' +INSIGHTS_CMS_BASE: 'http://127.0.0.1:8010/' +INSIGHTS_BASE_URL: 'http://127.0.0.1:8110/' +INSIGHTS_MEMCACHE: [ '127.0.0.1:11211' ] +INSIGHTS_MKTG_BASE: 'http://example.com/' +INSIGHTS_LOGOUT_URL: '{{ INSIGHTS_MKTG_BASE }}/accounts/logout/' +INSIGHTS_PRIVACY_POLICY_URL: '{{ INSIGHTS_MKTG_BASE }}/privacy-policy' +INSIGHTS_TERMS_OF_SERVICE_URL: '{{ INSIGHTS_MKTG_BASE }}/terms-service' +INSIGHTS_SUPPORT_EMAIL: '' +INSIGHTS_CMS_COURSE_SHORTCUT_BASE_URL: '{{ INSIGHTS_LMS_BASE }}/course' +INSIGHTS_OAUTH2_SECRET: 'secret' +INSIGHTS_OAUTH2_URL_ROOT: '{{ INSIGHTS_LMS_BASE }}/oauth2' +INSIGHTS_OAUTH2_URL_LOGOUT: '{{ INSIGHTS_LMS_BASE }}/logout' +INSIGHTS_OAUTH2_APP_CLIENT_NAME: insights +INSIGHTS_OAUTH2_APP_USERNAME: staff +INSIGHTS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +INSIGHTS_SECRET_KEY: 'YOUR_SECRET_KEY_HERE' +INSIGHTS_OAUTH2_KEY: 'YOUR_OAUTH2_KEY' +INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'insights-sso-key' +INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'insights-sso-secret' +INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'insights-backend-service-key' +INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'insights-backend-service-secret' +# This will not work on single instance sandboxes +INSIGHTS_DOC_BASE: 'http://127.0.0.1/en/latest' +ANALYTICS_API_ENDPOINT: 'http://127.0.0.1:8100/api/v0' +INSIGHTS_DATA_API_AUTH_TOKEN: 'changeme' +INSIGHTS_PLATFORM_NAME: 'edX' +INSIGHTS_APPLICATION_NAME: 'Insights' +INSIGHTS_SEGMENT_IO_KEY: 'YOUR_KEY' +# should match the timezone of your map reduce pipeline +INSIGHTS_TIME_ZONE: 'UTC' +INSIGHTS_LANGUAGE_CODE: 'en-us' +# email config +INSIGHTS_EMAIL_HOST: 'smtp.example.com' +INSIGHTS_EMAIL_HOST_PASSWORD: "mail_password" +INSIGHTS_EMAIL_HOST_USER: "mail_user" +INSIGHTS_EMAIL_PORT: 587 +INSIGHTS_ENABLE_AUTO_AUTH: false +INSIGHTS_SEGMENT_IGNORE_EMAIL_REGEX: !!null +INSIGHTS_THEME_SCSS: 'sass/themes/open-edx.scss' +INSIGHTS_RESEARCH_URL: 'https://www.edx.org/research-pedagogy' +INSIGHTS_OPEN_SOURCE_URL: 'http://set-me-please/' + +INSIGHTS_DOMAIN: 'insights' + +# Comma-delimited list of field names to include in the Learner List CSV download +# e.g., "username,segments,cohort,engagements.videos_viewed,last_updated" +# Default (null) includes all available fields, in alphabetical order +INSIGHTS_LEARNER_API_LIST_DOWNLOAD_FIELDS: !!null + +INSIGHTS_DATABASE_NAME: 'dashboard' +INSIGHTS_DATABASE_USER: rosencrantz +INSIGHTS_DATABASE_PASSWORD: secret +INSIGHTS_DATABASE_HOST: 127.0.0.1 +INSIGHTS_DATABASE_PORT: 3306 +INSIGHTS_MYSQL_OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + +INSIGHTS_DATABASES: + # rw user + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ INSIGHTS_DATABASE_NAME }}' + USER: '{{ INSIGHTS_DATABASE_USER }}' + PASSWORD: '{{ INSIGHTS_DATABASE_PASSWORD }}' + HOST: "{{ INSIGHTS_DATABASE_HOST }}" + PORT: '{{ INSIGHTS_DATABASE_PORT }}' + OPTIONS: "{{ INSIGHTS_MYSQL_OPTIONS }}" + +INSIGHTS_LMS_COURSE_SHORTCUT_BASE_URL: "URL_FOR_LMS_COURSE_LIST_PAGE" + +INSIGHTS_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +INSIGHTS_CDN_DOMAIN: !!null + +INSIGHTS_CORS_ORIGIN_WHITELIST_EXTRA: [] +INSIGHTS_CORS_ORIGIN_WHITELIST_DEFAULT: + - "{{ INSIGHTS_DOMAIN }}" +INSIGHTS_CORS_ORIGIN_WHITELIST: "{{ INSIGHTS_CORS_ORIGIN_WHITELIST_DEFAULT + INSIGHTS_CORS_ORIGIN_WHITELIST_EXTRA }}" + +# Remote config +INSIGHTS_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +INSIGHTS_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +INSIGHTS_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +INSIGHTS_ENABLE_ADMIN_URLS_RESTRICTION: false + +# +# This block of config is dropped into /edx/etc/insights.yml +# and is read in by analytics_dashboard/settings/production.py +INSIGHTS_CONFIG: + SUPPORT_EMAIL: '{{ INSIGHTS_SUPPORT_EMAIL }}' + DOCUMENTATION_LOAD_ERROR_URL: '{{ INSIGHTS_DOC_BASE }}/Reference.html#error-conditions' + SEGMENT_IO_KEY: '{{ INSIGHTS_SEGMENT_IO_KEY }}' + SEGMENT_IGNORE_EMAIL_REGEX: '{{ INSIGHTS_SEGMENT_IGNORE_EMAIL_REGEX }}' + PRIVACY_POLICY_URL: '{{ INSIGHTS_PRIVACY_POLICY_URL }}' + TERMS_OF_SERVICE_URL: '{{ INSIGHTS_TERMS_OF_SERVICE_URL }}' + HELP_URL: '{{ INSIGHTS_DOC_BASE }}' + SECRET_KEY: '{{ INSIGHTS_SECRET_KEY }}' + DATA_API_URL: '{{ ANALYTICS_API_ENDPOINT }}' + DATA_API_AUTH_TOKEN: '{{ INSIGHTS_DATA_API_AUTH_TOKEN }}' + SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ INSIGHTS_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' + + # Used to automatically configure OAuth2 Client + SOCIAL_AUTH_EDX_OAUTH2_KEY: '{{ INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + SOCIAL_AUTH_EDX_OAUTH2_SECRET: '{{ INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + SOCIAL_AUTH_EDX_OAUTH2_ISSUER: '{{ INSIGHTS_LMS_BASE }}' + SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: '{{ INSIGHTS_LMS_BASE }}' + SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: '{{ INSIGHTS_OAUTH2_URL_LOGOUT }}' + BACKEND_SERVICE_EDX_OAUTH2_KEY: '{{ INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + BACKEND_SERVICE_EDX_OAUTH2_SECRET: '{{ INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: '{{ INSIGHTS_OAUTH2_URL_ROOT }}' + + ENABLE_AUTO_AUTH: '{{ INSIGHTS_ENABLE_AUTO_AUTH }}' + PLATFORM_NAME: '{{ INSIGHTS_PLATFORM_NAME }}' + APPLICATION_NAME: '{{ INSIGHTS_APPLICATION_NAME }}' + CACHES: + default: &default_generic_cache + BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' + KEY_PREFIX: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-insights' + LOCATION: "{{ INSIGHTS_MEMCACHE }}" + TIME_ZONE: '{{ INSIGHTS_TIME_ZONE }}' + LANGUAGE_CODE: '{{ INSIGHTS_LANGUAGE_CODE }}' + # email config + EMAIL_HOST: '{{ INSIGHTS_EMAIL_HOST }}' + EMAIL_HOST_PASSWORD: '{{ INSIGHTS_EMAIL_HOST_PASSWORD }}' + EMAIL_HOST_USER: '{{ INSIGHTS_EMAIL_HOST_USER }}' + EMAIL_PORT: '{{ INSIGHTS_EMAIL_PORT }}' + # static file config + STATICFILES_DIRS: ["{{ insights_static_path }}"] + STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ insights_service_name }}/staticfiles" + RESEARCH_URL: '{{ INSIGHTS_RESEARCH_URL }}' + OPEN_SOURCE_URL: '{{ INSIGHTS_OPEN_SOURCE_URL }}' + # db config + DATABASES: "{{ INSIGHTS_DATABASES }}" + LMS_COURSE_SHORTCUT_BASE_URL: "{{ INSIGHTS_LMS_COURSE_SHORTCUT_BASE_URL }}" + COURSE_API_URL: "{{ INSIGHTS_COURSE_API_URL }}" + GRADING_POLICY_API_URL: "{{ INSIGHTS_GRADING_POLICY_API_URL }}" + MODULE_PREVIEW_URL: "{{ INSIGHTS_MODULE_PREVIEW_URL }}" + # When insights is co-located with other django services, we need to ensure they don't all + # use the same cookie names. + SESSION_COOKIE_NAME: "{{ INSIGHTS_SESSION_COOKIE_NAME | default('insights_sessionid') }}" + CSRF_COOKIE_NAME: "{{ INSIGHTS_CSRF_COOKIE_NAME | default('insights_csrftoken') }}" + LANGUAGE_COOKIE_NAME: "{{ INSIGHTS_LANGUAGE_COOKIE_NAME | default('insights_language') }}" + SESSION_EXPIRE_AT_BROWSER_CLOSE: "{{ INSIGHTS_SESSION_EXPIRE_AT_BROWSER_CLOSE }}" + CMS_COURSE_SHORTCUT_BASE_URL: "{{ INSIGHTS_CMS_COURSE_SHORTCUT_BASE_URL }}" + LEARNER_API_LIST_DOWNLOAD_FIELDS: "{{ INSIGHTS_LEARNER_API_LIST_DOWNLOAD_FIELDS }}" + # CDN url to serve assets from + CDN_DOMAIN: "{{ INSIGHTS_CDN_DOMAIN }}" + CSRF_COOKIE_SECURE: "{{ INSIGHTS_CSRF_COOKIE_SECURE }}" + +INSIGHTS_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-analytics-api" +INSIGHTS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false +INSIGHTS_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}" +INSIGHTS_NGINX_PORT: "18110" +INSIGHTS_NGINX_SSL_PORT: "18113" +INSIGHTS_GUNICORN_WORKERS: "2" +INSIGHTS_GUNICORN_EXTRA: "" +INSIGHTS_COURSE_API_URL: "{{ INSIGHTS_LMS_BASE }}/api/courses/v1/" +INSIGHTS_GRADING_POLICY_API_URL: "{{ INSIGHTS_LMS_BASE }}/api/grades/v1/" +INSIGHTS_MODULE_PREVIEW_URL: "{{ INSIGHTS_LMS_BASE }}/xblock" + +INSIGHTS_VERSION: "master" +INSIGHTS_GIT_IDENTITY: !!null + +INSIGHTS_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: edx-analytics-dashboard.git + VERSION: "{{ INSIGHTS_VERSION }}" + DESTINATION: "{{ insights_code_dir }}" + SSH_KEY: "{{ INSIGHTS_GIT_IDENTITY }}" + +INSIGHTS_CSRF_COOKIE_SECURE: false +# +# vars are namespace with the module name. +# +insights_environment: + DJANGO_SETTINGS_MODULE: "analytics_dashboard.settings.production" + ANALYTICS_DASHBOARD_CFG: "{{ COMMON_CFG_DIR }}/{{ insights_service_name }}.yml" + PATH: "{{ insights_nodeenv_bin }}:{{ insights_venv_dir }}/bin:{{ ansible_env.PATH }}" + THEME_SCSS: '{{ INSIGHTS_THEME_SCSS }}' + + +insights_service_name: insights +insights_venv_dir: "{{ insights_home }}/venvs/{{ insights_service_name }}" +insights_user: "{{ insights_service_name }}" +insights_app_dir: "{{ COMMON_APP_DIR }}/{{ insights_service_name }}" +insights_home: "{{ COMMON_APP_DIR }}/{{ insights_service_name }}" +insights_code_dir: "{{ insights_app_dir }}/edx_analytics_dashboard" +insights_python_path: "{{ insights_code_dir }}/analytics_dashboard" +insights_static_path: "{{ insights_code_dir }}/analytics_dashboard/static" +insights_conf_dir: "{{ insights_home }}" +insights_log_dir: "{{ COMMON_LOG_DIR }}/{{ insights_service_name }}" + +insights_nodeenv_dir: "{{ insights_home }}/nodeenvs/{{ insights_service_name }}" +insights_nodeenv_bin: "{{ insights_nodeenv_dir }}/bin" +insights_node_modules_dir: "{{ insights_code_dir }}/node_modules" +insights_node_bin: "{{ insights_node_modules_dir }}/.bin" +INSIGHTS_NODE_VERSION: "16.14.0" +INSIGHTS_NPM_VERSION: "8.5.5" + +insights_gunicorn_host: "127.0.0.1" +insights_gunicorn_port: "8110" +insights_gunicorn_timeout: "300" +insights_wsgi: "analytics_dashboard.wsgi:application" + +insights_django_settings: "analytics_dashboard.settings.production" +insights_manage: "{{ insights_code_dir }}/manage.py" + +insights_requirements_base: "{{ insights_code_dir }}/requirements" +insights_requirements: + - production.txt + - optional.txt + +# flag to run Insights on Python 3.8 +INSIGHTS_USE_PYTHON38: true + +# +# OS packages +# +insights_debian_pkgs: + - libmysqlclient-dev + - libssl-dev # needed for mysqlclient python library + - build-essential + - gettext + - pkg-config + - python3-pip + - python3-dev + +insights_release_specific_debian_pkgs: + xenial: + - openjdk-8-jdk + bionic: + - openjdk-8-jdk + focal: + - openjdk-8-jdk diff --git a/playbooks/roles/insights/meta/main.yml b/playbooks/roles/insights/meta/main.yml new file mode 100644 index 00000000000..2d2c474f2f7 --- /dev/null +++ b/playbooks/roles/insights/meta/main.yml @@ -0,0 +1,33 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role insights +# +dependencies: + - common + - role: supervisor + supervisor_spec: + - service: "{{ insights_service_name }}" + migration_check_services: "{{ insights_service_name }}" + python: "{{ insights_venv_dir }}/bin/python" + code: "{{ insights_code_dir | default(None) }}" + env: "{{ insights_home | default(None) }}/insights_env" + - role: edx_service + edx_service_use_python38: "{{ INSIGHTS_USE_PYTHON38 }}" + edx_service_name: "{{ insights_service_name }}" + edx_service_config: "{{ INSIGHTS_CONFIG }}" + edx_service_repos: "{{ INSIGHTS_REPOS }}" + edx_service_user: "{{ insights_user }}" + edx_service_home: "{{ insights_home }}" + edx_service_packages: + debian: "{{ insights_debian_pkgs + insights_release_specific_debian_pkgs[ansible_distribution_release] }}" + redhat: [] + edx_service_decrypt_config_enabled: "{{ INSIGHTS_DECRYPT_CONFIG_ENABLED }}" + edx_service_copy_config_enabled: "{{ INSIGHTS_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/insights/tasks/main.yml b/playbooks/roles/insights/tasks/main.yml new file mode 100644 index 00000000000..833becea998 --- /dev/null +++ b/playbooks/roles/insights/tasks/main.yml @@ -0,0 +1,179 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role insights +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: setup the insights env file + template: + src: "edx/app/insights/insights_env.j2" + dest: "{{ insights_app_dir }}/insights_env" + owner: "{{ insights_user }}" + group: "{{ insights_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: install application requirements + pip: + requirements: "{{ insights_requirements_base }}/{{ item }}" + virtualenv: "{{ insights_venv_dir }}" + state: present + extra_args: "--exists-action w" + virtualenv_python: python3.8 + become_user: "{{ insights_user }}" + with_items: "{{ insights_requirements }}" + tags: + - install + - install:app-requirements + +- name: "Install Datadog APM requirements" + when: COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP + pip: + name: + - ddtrace + extra_args: "--exists-action w" + virtualenv: "{{ insights_venv_dir }}" + state: present + become_user: "{{ insights_user }}" + tags: + - install + - install:app-requirements + +- name: create nodeenv + shell: "{{ insights_venv_dir }}/bin/nodeenv {{ insights_nodeenv_dir }} --node={{ INSIGHTS_NODE_VERSION }} --prebuilt --force" + become_user: "{{ insights_user }}" + tags: + - install + - install:system-requirements + +- name: upgrade npm + command: "npm install -g npm@{{ INSIGHTS_NPM_VERSION }}" + become_user: "{{ insights_user }}" + environment: "{{ insights_environment }}" + tags: + - install + - install:system-requirements + +# install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: install node dependencies + shell: "{{ insights_nodeenv_bin }}/npm install" + args: + chdir: "{{ insights_code_dir }}" + become_user: "{{ insights_user }}" + environment: "{{ insights_environment }}" + tags: + - install + - install:app-requirements + +- name: migrate + shell: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ insights_venv_dir }}/bin/python {{ insights_manage }} migrate --noinput" + args: + chdir: "{{ insights_code_dir }}" + become_user: "{{ insights_user }}" + environment: "{{ insights_environment }}" + when: migrate_db is defined and migrate_db|lower == "yes" + run_once: yes + tags: + - migrate + - migrate:db + +- name: run webpack + shell: ". {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/webpack --config webpack.prod.config.js" + args: + chdir: "{{ insights_code_dir }}" + become_user: "{{ insights_user }}" + environment: "{{ insights_environment }}" + tags: + - assets + - assets:gather + +- name: run collectstatic + shell: "{{ insights_venv_dir }}/bin/python {{ insights_manage }} {{ item }}" + args: + chdir: "{{ insights_code_dir }}" + become_user: "{{ insights_user }}" + environment: "{{ insights_environment }}" + with_items: + - "collectstatic --noinput" + tags: + - assets + - assets:gather + +- name: write out the supervisior wrapper + template: + src: "edx/app/insights/insights.sh.j2" + dest: "{{ insights_app_dir }}/{{ insights_service_name }}.sh" + mode: 0650 + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + +- name: write supervisord config + template: + src: edx/app/supervisor/conf.d.available/insights.conf.j2 + dest: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: enable supervisor script + file: + src: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf" + state: link + force: yes + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: create manage.py symlink + file: + src: "{{ insights_manage }}" + dest: "{{ COMMON_BIN_DIR }}/manage.{{ insights_service_name }}" + state: link + tags: + - install + - install:base + +- name: restart insights + supervisorctl: + state: restarted + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + name: "{{ insights_service_name }}" + when: not disable_edx_services + become_user: "{{ supervisor_service_user }}" + tags: + - manage:start diff --git a/playbooks/roles/insights/templates/edx/app/insights/insights.sh.j2 b/playbooks/roles/insights/templates/edx/app/insights/insights.sh.j2 new file mode 100644 index 00000000000..55a44b59c4e --- /dev/null +++ b/playbooks/roles/insights/templates/edx/app/insights/insights.sh.j2 @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set insights_venv_bin = insights_home + '/venvs/' + insights_service_name + '/bin' %} + +{% set executable = insights_venv_bin + '/gunicorn' %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = insights_venv_bin + '/newrelic-admin run-program ' + insights_venv_bin + '/gunicorn' %} + +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ INSIGHTS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" +export NEW_RELIC_APP_NAME="{{ INSIGHTS_NEWRELIC_APPNAME }}" +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = insights_venv_bin + '/ddtrace-run ' + executable %} +export DD_TAGS="service:{{ insights_service_name }}" +export DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true +# Copied from edx_django_service playbook for consistency; Datadog +# trace debug logging issue doesn't actually affect edxapp for some +# reason. +export DD_TRACE_LOG_STREAM_HANDLER=false +{% endif -%} + +source {{ insights_app_dir }}/insights_env + +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} --pythonpath={{ insights_python_path }} -b {{ insights_gunicorn_host }}:{{ insights_gunicorn_port }} -w {{ INSIGHTS_GUNICORN_WORKERS }} --timeout={{ insights_gunicorn_timeout }} {{ INSIGHTS_GUNICORN_EXTRA }} {{ insights_wsgi }} diff --git a/playbooks/roles/insights/templates/edx/app/insights/insights_env.j2 b/playbooks/roles/insights/templates/edx/app/insights/insights_env.j2 new file mode 100644 index 00000000000..896a3693149 --- /dev/null +++ b/playbooks/roles/insights/templates/edx/app/insights/insights_env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% for name,value in insights_environment.items() -%} +{%- if value -%} +export {{ name }}="{{ value }}" +{% endif %} +{%- endfor %} diff --git a/playbooks/roles/insights/templates/edx/app/supervisor/conf.d.available/insights.conf.j2 b/playbooks/roles/insights/templates/edx/app/supervisor/conf.d.available/insights.conf.j2 new file mode 100644 index 00000000000..d53377deb08 --- /dev/null +++ b/playbooks/roles/insights/templates/edx/app/supervisor/conf.d.available/insights.conf.j2 @@ -0,0 +1,11 @@ +# {{ ansible_managed }} + +[program:{{ insights_service_name }}] + +command={{ insights_app_dir }}/insights.sh +user={{ common_web_user }} +directory={{ insights_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/insightvm_agent/defaults/main.yml b/playbooks/roles/insightvm_agent/defaults/main.yml new file mode 100644 index 00000000000..0067fc986dd --- /dev/null +++ b/playbooks/roles/insightvm_agent/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +r7_installer_location: /tmp/rapid7_agent_installer.sh +R7_TOKEN: "SET-ME-PLEASE" +R7_BUCKET: "SET-ME-PLEASE (ex. bucket-name)" diff --git a/playbooks/roles/insightvm_agent/tasks/main.yml b/playbooks/roles/insightvm_agent/tasks/main.yml new file mode 100644 index 00000000000..faf170086f2 --- /dev/null +++ b/playbooks/roles/insightvm_agent/tasks/main.yml @@ -0,0 +1,52 @@ +--- +# insightvm_agent +# +# Example play: +# +# roles: +# - insightvm_agent + +- name: Check if Rapid7 Agent Currently Installed + stat: + path: /etc/systemd/system/ir_agent.service + register: r7_service + retries: 3 + until: r7_service is succeeded + tags: + - manage_rapid7_check_agent + +- name: Pull Rapid7 Agent Installer from S3 + aws_s3: + bucket: "{{ R7_BUCKET }}" + object: rapid7/rapid7_agent_installer.sh + dest: "{{ r7_installer_location }}" + mode: get + overwrite: different + ignore_nonexistent_bucket: true + register: pull_rapid7_agent_installer_from_s3_result + ignore_errors: true + tags: + - manage_rapid7_pull_installer + when: not r7_service.stat.exists|bool + +- name: Ensure File Permissions are set + file: + path: "{{ r7_installer_location }}" + mode: "0755" + owner: root + group: root + ignore_errors: "{{ ansible_check_mode }}" + register: ensure_file_permissions_are_set_result + tags: + - manage_rapid7_file_perms + when: pull_rapid7_agent_installer_from_s3_result is not failed and not r7_service.stat.exists|bool + +- name: Agent Install + command: "/tmp/rapid7_agent_installer.sh install_start --token {{ R7_TOKEN }}" + no_log: true + tags: + - manage_rapid7_agent_install + ignore_errors: "{{ ansible_check_mode }}" + when: ensure_file_permissions_are_set_result is not failed and pull_rapid7_agent_installer_from_s3_result is not failed and not r7_service.stat.exists|bool + args: + creates: /etc/systemd/system/ir_agent.service diff --git a/playbooks/roles/jenkins_admin/defaults/main.yml b/playbooks/roles/jenkins_admin/defaults/main.yml new file mode 100644 index 00000000000..218d81edcf4 --- /dev/null +++ b/playbooks/roles/jenkins_admin/defaults/main.yml @@ -0,0 +1,101 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role jenkins_admin +# + +# +# vars are namespace with the module name. +# + +JENKINS_ADMIN_NAME: 'default_jenkins_name' + +# A dictionary of AWS credentials to use to make +# a boto file for jenkins. +JENKINS_ADMIN_AWS_CREDENTIALS: !!null + +# jenkins_admin also requires other variables that are not defined by default. + +# JENKINS_ADMIN_S3_PROFILE: !!null +# JENKINS_ADMIN_BACKUP_BUCKET: !!null +# JENKINS_ADMIN_BACKUP_KEY: !!null + +jenkins_admin_role_name: jenkins_admin + + +JENKINS_ADMIN_VERSION: "1.658" +# +# OS packages +# + +jenkins_admin_debian_pkgs: + # These are copied from the edxapp + # role so that we can create virtualenvs + # on the jenkins server for edxapp + # for compiling the virtualenv + # (only needed if wheel files aren't available) + - build-essential + - s3cmd + - pkg-config + - graphviz-dev + - graphviz + - libmysqlclient-dev + # for scipy, do not install + # libopenblas-base, it will cause + # problems for numpy + - gfortran + - liblapack-dev + - g++ + - libxml2-dev + - libxslt1-dev + # apparmor + - apparmor-utils + # misc + - curl + - ipython + - nodejs + - ntp + # for shapely + - libgeos-dev + # i18n + - gettext + # Pillow (PIL Fork) Dependencies + # Needed by the CMS to manipulate images. + - libjpeg8-dev + - libpng12-dev + # for check-migrations + - mysql-client + # for aws cli scripting + - jq + # pyOpenSSL prerequisite + # Need by python script that check SSL expiration + - libffi-dev + +jenkins_admin_redhat_pkgs: [] + +jenkins_admin_plugins: [] # Plugins installed manually, not tracked here. + +# See templates directory for potential basic jobs you could add to your jenkins. +jenkins_admin_jobs: [] + +# Supervisor related settings +jenkins_supervisor_user: "{{ jenkins_user }}" +jenkins_supervisor_app_dir: "{{ jenkins_home }}/supervisor" +jenkins_supervisor_cfg_dir: "{{ jenkins_supervisor_app_dir }}/conf.d" +jenkins_supervisor_available_dir: "{{ jenkins_supervisor_app_dir }}/available.d" +jenkins_supervisor_data_dir: "{{ jenkins_home }}/supervisor/data" +jenkins_supervisor_cfg: "{{ jenkins_supervisor_app_dir }}/supervisord.conf" +jenkins_supervisor_log_dir: "{{ COMMON_LOG_DIR }}/supervisor/jenkins" +jenkins_supervisor_venv_dir: "{{ jenkins_home }}/venvs/supervisor" +jenkins_supervisor_venv_bin: "{{ jenkins_supervisor_venv_dir }}/bin" +jenkins_supervisor_ctl: "{{ jenkins_supervisor_venv_bin }}/supervisorctl" +jenkins_supervisor_service_user: "{{ jenkins_user }}" + +jenkins_admin_scripts_dir: "{{ jenkins_home }}/scripts" diff --git a/playbooks/roles/jenkins_admin/handlers/main.yml b/playbooks/roles/jenkins_admin/handlers/main.yml new file mode 100644 index 00000000000..a64427a5122 --- /dev/null +++ b/playbooks/roles/jenkins_admin/handlers/main.yml @@ -0,0 +1,21 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Handlers for role jenkins_admin +# +# Overview: +# + +# Have to use shell here because supervisorctl doesn't support +# process groups. +- name: restart nat monitor + shell: "{{ jenkins_supervisor_ctl }} -c {{ jenkins_supervisor_cfg }} restart nat_monitor:*" + when: not disable_edx_services diff --git a/playbooks/roles/jenkins_admin/meta/main.yml b/playbooks/roles/jenkins_admin/meta/main.yml new file mode 100644 index 00000000000..142b900ebd6 --- /dev/null +++ b/playbooks/roles/jenkins_admin/meta/main.yml @@ -0,0 +1,39 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role jenkins_admin +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } +dependencies: + - common + - edxapp_common + - role: jenkins_master + jenkins_plugins: "{{ jenkins_admin_plugins }}" + JENKINS_VERSION: "{{ JENKINS_ADMIN_VERSION }}" + jenkins_deb_url: "https://pkg.jenkins.io/debian/binary/jenkins_%7B%7B%20JENKINS_VERSION%20%7D%7D_all.deb" + jenkins_custom_plugins: [] + jenkins_bundled_plugins: [] + - role: supervisor + supervisor_app_dir: "{{ jenkins_supervisor_app_dir }}" + supervisor_data_dir: "{{ jenkins_supervisor_data_dir }}" + supervisor_log_dir: "{{ jenkins_supervisor_log_dir }}" + supervisor_venv_dir: "{{ jenkins_supervisor_venv_dir }}" + supervisor_service_user: "{{ jenkins_supervisor_user }}" + supervisor_available_dir: "{{ jenkins_supervisor_available_dir }}" + supervisor_cfg_dir: "{{ jenkins_supervisor_cfg_dir }}" + supervisor_service: "supervisor.jenkins" + supervisor_http_bind_port: '9003' diff --git a/playbooks/roles/jenkins_admin/tasks/main.yml b/playbooks/roles/jenkins_admin/tasks/main.yml new file mode 100644 index 00000000000..777cd731284 --- /dev/null +++ b/playbooks/roles/jenkins_admin/tasks/main.yml @@ -0,0 +1,174 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role jenkins_admin +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- fail: msg="JENKINS_ADMIN_S3_PROFILE is not defined." + when: JENKINS_ADMIN_S3_PROFILE is not defined + +- fail: msg="JENKINS_ADMIN_S3_PROFILE.name is not defined." + when: JENKINS_ADMIN_S3_PROFILE.name is not defined + +- fail: msg="JENKINS_ADMIN_S3_PROFILE.access_key is not defined." + when: JENKINS_ADMIN_S3_PROFILE.access_key is not defined + +- fail: msg="JENKINS_ADMIN_S3_PROFILE.secret_key is not defined." + when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined + +- name: create the scripts directory + file: + path: "{{ jenkins_admin_scripts_dir }}" + state: "directory" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + +- name: configure s3 plugin + template: + src: "./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2" + dest: "{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0644 + +- name: configure the boto profiles for jenkins + template: + src: "./{{ jenkins_home }}/boto.j2" + dest: "{{ jenkins_home }}/.boto" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0600 + tags: + - aws-config + +- name: create the .aws directory + file: + path: "{{ jenkins_home }}/.aws" + state: "directory" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0700 + tags: + - aws-config + +- name: configure the awscli profiles for jenkins + template: + src: "./{{ jenkins_home }}/aws_config.j2" + dest: "{{ jenkins_home }}/.aws/config" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0600 + tags: + - aws-config + +- name: create the ssh directory + file: + path: "{{ jenkins_home }}/.ssh" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0700 + state: directory + +# Need to add Github to known_hosts to avoid +# being prompted when using git through ssh +- name: Add github.com to known_hosts if it does not exist + shell: "ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts" + +- name: create job directory + file: + path: "{{ jenkins_home }}/jobs" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + state: directory + +- name: create admin job directories + file: + path: "{{ jenkins_home }}/jobs/{{ item }}" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + state: directory + with_items: "{{ jenkins_admin_jobs }}" + +- name: create admin job config files + template: + src: "./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2" + dest: "{{ jenkins_home }}/jobs/{{ item }}/config.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0644 + with_items: "{{ jenkins_admin_jobs }}" + +- name: install system packages for edxapp virtualenvs + apt: + pkg: "{{ ','.join(jenkins_admin_debian_pkgs) }}" + state: "present" + update_cache: yes + +# This is necessary so that ansible can run with +# sudo set to True (as the jenkins user) on jenkins +- name: grant sudo access to the jenkins user + copy: + content: "{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL" + dest: "/etc/sudoers.d/99-jenkins" + owner: "root" + group: "root" + mode: 0440 + validate: "visudo -cf %s" + +- name: get s3 one time url + aws_s3: + bucket: "{{ JENKINS_ADMIN_BACKUP_BUCKET }}" + object: "{{ JENKINS_ADMIN_BACKUP_S3_KEY }}" + mode: "geturl" + expiration: 30 + register: s3_one_time_url + when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined + +- name: download s3 backup + get_url: + url: "{{ s3_one_time_url.url }}" + dest: "/tmp/jenkins_backup.tar.gz" + mode: 0644 + owner: "{{ jenkins_user }}" + when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined + +- name: stop jenkins + service: + name: "jenkins" + state: "stopped" + when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined + +- name: restore the backup + unarchive: + src: "/tmp/jenkins_backup.tar.gz" + dest: "{{ jenkins_home }}/.." + owner: "{{ jenkins_user }}" + copy: false + when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined + +# When testing consider commenting this out +- name: start jenkins + service: + name: "jenkins" + state: "started" + when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 new file mode 100644 index 00000000000..8700534b4dc --- /dev/null +++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 @@ -0,0 +1,6 @@ +{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.items() %} +[profile {{ deployment }}] +aws_access_key_id = {{ creds.access_id }} +aws_secret_access_key = {{ creds.secret_key }} + +{% endfor %} diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 new file mode 100644 index 00000000000..8700534b4dc --- /dev/null +++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 @@ -0,0 +1,6 @@ +{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.items() %} +[profile {{ deployment }}] +aws_access_key_id = {{ creds.access_id }} +aws_secret_access_key = {{ creds.secret_key }} + +{% endfor %} diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/hudson.plugins.s3.S3BucketPublisher.xml.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/hudson.plugins.s3.S3BucketPublisher.xml.j2 new file mode 100644 index 00000000000..2c11599be1c --- /dev/null +++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/hudson.plugins.s3.S3BucketPublisher.xml.j2 @@ -0,0 +1,10 @@ + + + + + {{ JENKINS_ADMIN_S3_PROFILE.name }} + {{ JENKINS_ADMIN_S3_PROFILE.access_key }} + {{ JENKINS_ADMIN_S3_PROFILE.secret_key }} + + + diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/jobs/backup-jenkins/config.xml.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/jobs/backup-jenkins/config.xml.j2 new file mode 100644 index 00000000000..5369df37198 --- /dev/null +++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/jobs/backup-jenkins/config.xml.j2 @@ -0,0 +1,59 @@ + + + + + false + + + true + false + false + false + + + @daily + + + false + + + +#!/bin/bash -x + +# Delete all files in the workspace +rm -rf * +# Create a directory for the job definitions +mkdir -p $BUILD_ID/jobs +# Copy global configuration files into the workspace +cp $JENKINS_HOME/*.xml $BUILD_ID/ +# Copy keys and secrets into the workspace +cp $JENKINS_HOME/secret.key $BUILD_ID/ +cp $JENKINS_HOME/secret.key.not-so-secret $BUILD_ID/ +cp -r $JENKINS_HOME/secrets $BUILD_ID/ +# Copy user configuration files into the workspace +#cp -r $JENKINS_HOME/users $BUILD_ID/ +# Copy job definitions into the workspace +rsync -am --include='config.xml' --include='*/' --prune-empty-dirs --exclude='*' $JENKINS_HOME/jobs/ $BUILD_ID/jobs/ +# Create an archive from all copied files (since the S3 plugin cannot copy folders recursively) +tar czf $BUILD_ID.tar.gz $BUILD_ID/ +# Remove the directory so only the archive gets copied to S3 +rm -rf $BUILD_ID + + + + + + {{ JENKINS_ADMIN_S3_PROFILE.name }} + + + edx-jenkins-backups/{{ JENKINS_ADMIN_NAME }} + ${BUILD_ID}.tar.gz + STANDARD + US_EAST_1 + + + + + + + diff --git a/playbooks/roles/jenkins_analytics/README.rst b/playbooks/roles/jenkins_analytics/README.rst new file mode 100644 index 00000000000..bd934debec7 --- /dev/null +++ b/playbooks/roles/jenkins_analytics/README.rst @@ -0,0 +1,645 @@ +Jenkins Analytics +################# + +A role that sets up Jenkins for scheduling analytics tasks. + +This role performs the following steps: + +- Installs Jenkins using ``jenkins_master``. +- Configures ``config.xml`` to enable security and use Github OAuth plugin (by + default) or Unix Auth Domain. +- Creates Jenkins credentials. +- Enables the use of Jenkins CLI. +- Installs a seed job from configured repository, launches it and waits for it + to finish. +- The seed job creates the analytics task jobs. + +Each analytics task job is created using a task-specific DSL script which +determines the structure of the Jenkins job, e.g. its scheduled frequency, the +git repos cloned to run the task, the parameters the job requires, and the +shell script used to run the analytics task. These DSL scripts live in a +separate git repo, configured by ``ANALYTICS_SCHEDULE_JOBS_DSL_REPO_*``. + +Configuration +************* + +When you are using vagrant you **need** to set +``VAGRANT_JENKINS_LOCAL_VARS_FILE`` environment variable. This variable must +point to a file containing all required variables from this section. + +This file needs to contain, at least, the following variables (see the next few +sections for more information about them): + +- ``JENKINS_ANALYTICS_GITHUB_OAUTH_CLIENT_*`` or + ``JENKINS_ANALYTICS_USER_PASSWORD_PLAIN``. See `Jenkins Security`_ for + details. +- (``JENKINS_ANALYTICS_GITHUB_CREDENTIAL_*`` and + ``ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_*``) and/or + ``JENKINS_ANALYTICS_CREDENTIALS``. See `Jenkins Credentials`_ for details. +- ``ANALYTICS_SCHEDULE_SECURE_REPO_*`` and + ``ANALYTICS_SCHEDULE__EXTRA_VARS``. See `Jenkins Seed Job + Configuration`_ for details. + +End-user editable configuration +=============================== + +Jenkins Security +---------------- + +The ``jenkins_analytics`` role provides two options for controlling +authentication and authorization to the Jenkins application: + +- `Github OAuth plugin`_ (default) +- Unix system user + +Both roles control authorization permissions using the `Matrix Authorization +Strategy`_. See `Authorization`_ for details. + +Github OAuth +............ + +To select this security mechanism, set +``JENKINS_ANALYTICS_AUTH_REALM: github_oauth``. + +The `Github OAuth plugin`_ uses Github usernames and organization memberships +to control access to the Jenkins GUI and CLI tool. + +To configure Github OAuth: + +1. Create a `GitHub application registration`_. + + - Application name: choose an appropriate name, e.g. edX Analytics + Scheduler + - Homepage URL: choose an appropriate URL within your Jenkins install, + usually the home page. + e.g., ``http://localhost:8080`` + - Authorization callback URL: Must be your Jenkins base URL, with path + ``/securityRealm/finishLogin``. + e.g., ``http://localhost:8080/securityRealm/finishLogin`` + +2. Copy the Client ID and Client Secret into these variables: + + :: + + JENKINS_ANALYTICS_GITHUB_OAUTH_CLIENT_ID: + JENKINS_ANALYTICS_GITHUB_OAUTH_CLIENT_SECRET: + +3. Optionally add your Github username or groups to the + ``JENKINS_ANALYTICS_AUTH_JOB_BUILDERS`` and/or + ``JENKINS_ANALYTICS_AUTH_ADMINISTRATORS`` lists. See `Authorization`_ below + for details. + +4. | Optionally, but only with good reason, update the list of Github OAuth + Scopes. This setting determines the Github permissions that the Jenkins + application will have in Github on behalf of the authenticated user. + | Default value is: + + :: + + JENKINS_ANALYTICS_GITHUB_OAUTH_SCOPES: + - read:org + - user:email + +5. You may also update the Github OAuth Web URI and API URI values, if for + instance, you're using a locally installed enterprise version of Github. + Default values are: + + :: + + JENKINS_ANALYTICS_GITHUB_OAUTH_WEB_URI: 'https://github.com/' + JENKINS_ANALYTICS_GITHUB_OAUTH_API_URI: 'https://api.github.com/' + +Unix system user +................ + +To select this security mechanism, set ``JENKINS_ANALYTICS_AUTH_REALM: unix``. + +This security mechanism uses the ``jenkins`` system user and password for +access to the Jenkins GUI and CLI tool. + +You'll need to override default ``jenkins`` user password, please do that +carefully as this sets up the **shell** password for this user. + +You'll need to set a plain password so ansible can reach Jenkins via the +command line tool. + +:: + + JENKINS_ANALYTICS_AUTH_REALM: unix + JENKINS_ANALYTICS_USER_PASSWORD_PLAIN: "your plain password" + +Authorization +............. + +The ``jenkins_analytics`` role configures authorization using the `Matrix +Authorization Strategy`_. This strategy provides fine-grained control over +which permissions are granted to which users or group members. + +Currently there are three different levels of user access configured: + +- ``anonymous``: The ``anonymous`` user is special in Jenkins, and denotes any + unauthenticated user. By default, no permissions are granted to anonymous + users, which forces all users to the login screen. +- ``JENKINS_ANALYTICS_AUTH_ADMINISTRATORS``: list of members who are granted + all permissions by default. The ``jenkins`` user is automatically added to + this list, so that ansible can maintain the Jenkins instance. + See `Security Note`_ below. +- ``JENKINS_ANALYTICS_AUTH_JOB_BUILDERS``: list of members who are granted + permissions sufficient for maintaining Jobs, Credentials, and Views. + +When ``JENKINS_ANALYTICS_AUTH_REALM: github_oauth``, members of the above lists +may be GitHub users, organizations, or teams. + +- ``username`` - give permissions to a specific GitHub username. +- ``organization`` - give permissions to every user that belongs to a specific + GitHub organization. Members must be *public members* of the organization + for the authorization to work correctly. Also, the organization itself must + allow access by the Github OAuth application, which must be granted by an + administrator of the organization. See `Github third-party application + restrictions`_ for more information. +- ``organization*team`` - give permissions to a specific GitHub team of a + GitHub organization. Notice that organization and team are separated by an + asterisk (``*``). The Github OAuth plugin documentation doesn't say so, but + the team probably needs to be a public team. + +For example, this configuration grants job builder access to all of +``edx-ops``, and admin access only to members of the +``jenkins-config-push-pull`` team within ``edx-ops``. + +:: + + JENKINS_ANALYTICS_AUTH_JOB_BUILDERS: + - edx-ops + JENKINS_ANALYTICS_AUTH_ADMINISTRATORS: + - edx-ops*jenkins-config-push-pull + +The list of permissions granted to each group is also configurable, but +exercise caution when changing. + +- ``JENKINS_ANALYTICS_AUTH_ANONYMOUS_PERMISSIONS``: Defaults to an empty list, + indicating no permissions. +- ``JENKINS_ANALYTICS_AUTH_ADMINISTRATOR_PERMISSIONS``: Defaults to the full + list of available Jenkins permissions at time of writing. +- ``JENKINS_ANALYTICS_AUTH_JOB_BUILDER_PERMISSIONS``: By default, job builders + are missing Jenkins Admin/Update permissions, as well as access required to + administer slave Jenkins instances. However, they are granted these + permissions: + + - ``com.cloudbees.plugins.credentials.CredentialsProvider.*``: Allows + management of Jenkins Credentials. + - ``hudson.model.Hudson.Read``: Grants read access to almost all pages in + Jenkins. + - ``hudson.model.Hudson.RunScripts``: Grants access to the Jenkins Script + Console and CLI groovy interface. + - ``hudson.model.Item.*``: Allows management of Jenkins Jobs. + - ``hudson.model.Run.*``: Allows management of Jenkins Job Runs. + - ``hudson.model.View.*``: Allows management of Jenkins Views. + - ``hudson.scm.SCM.Tag``: Allows users to create a new tag in the source + code repository for a given build. + +The user/group lists and permissions are joined using matching keys in the +``jenkins_auth_users`` and ``jenkins_auth_permissions`` structures. + +If additional groups are required, you must add them to both +``jenkins_auth_users`` and ``jenkins_auth_permissions``. This example shows the +current 3 groups, plus a fourth group whose members can view Job status: + +:: + + jenkins_auth_users: + anonymous: + - anonymous + administrators: "{{ jenkins_admin_users + JENKINS_ANALYTICS_AUTH_ADMINISTRATORS }}" + job_builders: "{{ JENKINS_ANALYTICS_AUTH_JOB_BUILDERS | default([]) }}" + job_readers: "{{ JENKINS_ANALYTICS_AUTH_JOB_READERS | default([]) }}" + + jenkins_auth_permissions: + anonymous: "{{ JENKINS_ANALYTICS_AUTH_ANONYMOUS_PERMISSIONS }}" + administrators: "{{ JENKINS_ANALYTICS_AUTH_ADMINISTRATOR_PERMISSIONS }}" + job_builders: "{{ JENKINS_ANALYTICS_AUTH_JOB_BUILDER_PERMISSIONS }}" + job_readers: + - `hudson.model.Hudson.Read` + - `hudson.model.Item.Discover` + - `hudson.model.Item.Read` + - `hudson.model.View.Read` + +Security Note + + +As mentioned above, we append the ``jenkins`` user to the +``JENKINS_ANALYTICS_AUTH_ADMINISTRATORS`` list, to allow ansible to configure +Jenkins via the CLI tool. However, when +``JENKINS_ANALYTICS_AUTH_REALM: github_oauth``, there is a risk that the owner +of the Github username jenkins use that login to gain admin access to Jenkins. +This would be a risk no matter which username we chose for this role. + +Jenkins credentials +------------------- + +Jenkins contains its own credential store. To fill it with credentials, we +recommend overriding these variables: + +- ``JENKINS_ANALYTICS_GITHUB_CREDENTIAL_USER``: github username, with read + access to the secure config and job dsl repos. +- ``JENKINS_ANALYTICS_GITHUB_CREDENTIAL_PASSPHRASE``: optional passphrase, if + required for ``JENKINS_ANALYTICS_GITHUB_CREDENTIAL_USER``. Default is + ``null``. +- ``JENKINS_ANALYTICS_GITHUB_CREDENTIAL_KEY``: private key for the + ``JENKINS_ANALYTICS_GITHUB_CREDENTIAL_USER``, e.g. + ``"{{ lookup('file', '/home/you/.ssh/id_rsa') }}"`` +- ``ANALYTICS_SCHEDULE_SECURE_REPO_MASTER_SSH_CREDENTIAL_FILE``: path to the + ssh key file, relative to the ``ANALYTICS_SCHEDULE_SECURE_REPO_URL``. This + file will be used as the private key to grant ssh access to the EMR + instances. See `Jenkins Seed Job Configuration`_ for details. + +Note that because the ``ANALYTICS_SCHEDULE_SECURE_REPO_*`` isn't cloned until +the seed job is built, the ``ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_ID`` +credential uses ``type: ssh-private-keyfile``, which allows the credential to +be created before the private key file actually exists on the file system. + +Alternatively, you may override the ``JENKINS_ANALYTICS_CREDENTIALS`` variable. +This variable is a list of objects, each object representing a single +credential. For now passwords, ssh-keys, and ssh key files are supported. Each +credential has a unique ID, which is used to match the credential to the +task(s) for which it is needed. + +Default value for ``JENKINS_ANALYTICS_CREDENTIALS``, and the variables it +depends on: + +:: + + JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID: 'github-deploy-key' + JENKINS_ANALYTICS_GITHUB_USER: 'git' + JENKINS_ANALYTICS_GITHUB_PASSPHRASE: null + + ANALYTICS_SCHEDULE_SECURE_REPO_DEST: "analytics-secure-config" + ANALYTICS_SCHEDULE_SECURE_REPO_MASTER_SSH_CREDENTIAL_FILE: "aws.pem" + ANALYTICS_SCHEDULE_SEED_JOB_NAME: "AnalyticsSeedJob" + ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_ID: "ssh-access-key" + ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_USER: "hadoop" + ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_PASSPHRASE: null + ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_FILE: "{{ jenkins_home }}/workspace/{{ ANALYTICS_SCHEDULE_SEED_JOB_NAME }}/{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/{{ ANALYTICS_SCHEDULE_SECURE_REPO_MASTER_SSH_CREDENTIAL_FILE }}" + + JENKINS_ANALYTICS_CREDENTIALS: + - id: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID }}" + scope: GLOBAL + username: "{{ JENKINS_ANALYTICS_GITHUB_USER }}" + type: ssh-private-key + passphrase: "{{ JENKINS_ANALYTICS_GITHUB_PASSPHRASE }}" + description: github access key, generated by ansible + privatekey: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_KEY }}" + - id: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_ID }}" + scope: GLOBAL + username: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_USER }}" + type: ssh-private-keyfile + passphrase: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_PASSPHRASE }}" + description: ssh access key, generated by ansible + privatekey: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_FILE }}" + +If you wish to use an explicit SSH key instead of reading it from a file, you +could override ``JENKINS_ANALYTICS_CREDENTIALS`` like this: + +:: + + ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_KEY: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- + + JENKINS_ANALYTICS_CREDENTIALS: + - id: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID }}" + scope: GLOBAL + username: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_USER }}" + type: ssh-private-key + passphrase: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_PASSPHRASE }}" + description: github access key, generated by ansible + privatekey: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_KEY }}" + - id: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_ID }}" + scope: GLOBAL + username: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_USER }}" + type: ssh-private-key + passphrase: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_PASSPHRASE }}" + description: ssh access key, generated by ansible + privatekey: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_KEY }}" + +Jenkins seed job configuration +------------------------------ + +The seed job creates the Analytics Jobs that will run the analytics tasks. By +default, the seed job creates all the available Analytics Jobs, but you can +disable these jobs, and set their parameters, using +``ANALYTICS_SCHEDULE__*``. + +Currently supported analytics tasks are: + +- ``ANSWER_DISTRIBUTION``: invokes + ``edx.analytics.tasks.answer_dist.AnswerDistributionWorkflow`` via the + ``AnswerDistributionWorkflow.groovy`` DSL. +- ``IMPORT_ENROLLMENTS_INTO_MYSQL``: invokes + ``edx.analytics.tasks.enrollments.ImportEnrollmentsIntoMysql`` via the + ``ImportEnrollmentsIntoMysql.groovy`` DSL. +- ``COURSE_ACTIVITY_WEEKLY``: invokes + ``edx.analytics.tasks.user_activity.CourseActivityWeeklyTask`` via the + ``CourseActivityWeeklyTask.groovy`` DSL. +- ``INSERT_TO_MYSQL_ALL_VIDEO``: invokes + ``edx.analytics.tasks.video.InsertToMysqlAllVideoTask`` via the + ``InsertToMysqlAllVideoTask.groovy`` DSL. +- ``INSERT_TO_MYSQL_COURSE_ENROLL_BY_COUNTRY:`` invokes + ``edx.analytics.tasks.location_per_course.InsertToMysqlCourseEnrollByCountryWorkflow`` + via the ``InsertToMysqlCourseEnrollByCountryWorkflow.groovy`` DSL. + +Since running the analytics tasks on EMR requires confidential ssh keys, the +convention is to store them in a secure repo, which is then cloned when running +the seed job. To use a secure repo, override +``ANALYTICS_SCHEDULE_SECURE_REPO_URL`` and +``ANALYTICS_SCHEDULE_SECURE_REPO_VERSION``. + +For example: + +:: + + ANALYTICS_SCHEDULE_SECURE_REPO_URL: "git@github.com:open-craft/analytics-sandbox-private.git" + ANALYTICS_SCHEDULE_SECURE_REPO_VERSION: "customer-analytics-schedule" + +The seed job also clones a second repo, which contains the DSL scripts that +contain the analytics task DSLs. That repo is configured using +``ANALYTICS_SCHEDULE_JOBS_DSL_REPO_*``, and it will be cloned directly into the +seed job workspace. + +**Note:** There are two ways to specify a ssl-based github repo URL. Note the +subtle difference in the paths: ``github.com:your-org`` vs. +``github.com/your-org``. + +- git@github.com:your-org/private-repo.git ✓ +- ssh://git@github.com/your-org/private-repo.git ✓ + +*Not like this:* + +- git@github.com/your-org/private-repo.git ❌ +- ssh://git@github.com:your-org/private-repo.git ❌ + +The full list of seed job configuration variables is: + +- ``ANALYTICS_SCHEDULE_SECURE_REPO_URL``: Optional URL for the git repo that + contains the analytics task schedule configuration file. If set, Jenkins + will clone this repo when the seed job is run. Default is ``null``. +- ``ANALYTICS_SCHEDULE_SECURE_REPO_VERSION``: Optional branch/tagname to + checkout for the secure repo. Default is ``master``. +- ``ANALYTICS_SCHEDULE_SECURE_REPO_DEST``: Optional target dir for the the + secure repo clone, relative to the seed job workspace. Default is + ``analytics-secure-config``. +- ``ANALYTICS_SCHEDULE_SECURE_REPO_CREDENTIAL_ID``: Credential id with read + access to the secure repo. Default is + ``{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID }}``. See `Jenkins Credentials`_ + below for details. +- ``ANALYTICS_SCHEDULE_JOBS_DSL_REPO_URL``: Optional URL for the git repo that + contains the analytics job DSLs. Default is + ``git@github.com:edx/jenkins-job-dsl-internal.git``. This repo is cloned + directly into the seed job workspace. +- ``ANALYTICS_SCHEDULE_JOBS_DSL_REPO_VERSION``: Optional branch/tagname to + checkout for the job DSL repo. Default is ``master``. +- ``ANALYTICS_SCHEDULE_JOBS_DSL_REPO_CREDENTIAL_ID``: Credential id with read + access to the job DSL repo. Default is + ``{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID }}``. See `Jenkins Credentials`_ + below for details. +- ``ANALYTICS_SCHEDULE_JOBS_DSL_CLASSPATH``: Optional additional classpath + jars and dirs required to run the job DSLs. Each path must be + newline-separated, and relative to the seed job workspace. Default is: + + :: + + src/main/groovy + lib/*.jar + +- ``ANALYTICS_SCHEDULE_JOBS_DSL_TARGET_JOBS``: DSLs for the top-level seed job + to run on build. Default is + ``jobs/analytics-edx-jenkins.edx.org/*Jobs.groovy`` + +- ``ANALYTICS_SCHEDULE_``: ``true``\ \|\ ``false``. Must be set to + ``true`` to create the analytics task. +- ``ANALYTICS_SCHEDULE__FREQUENCY``: Optional string representing + how often the analytics task should be run. Uses a modified cron syntax, + e.g. ``@daily``, ``@weekly``, see `stackoverflow`_ for details. Set to empty + string to disable cron. Default is different for each analytics task. +- ``ANALYTICS_SCHEDULE__EXTRA_VARS``: YML @file location to + override the analytics task parameters. File locations can be absolute, or + relative to the seed job workspace. You may choose to use raw YAML instead + of a @file location, but be aware that any changes made in the Jenkins GUI + will be overridden if the ``jenkins_analytics`` ansible role is re-run. + +Consult the individual analytics task DSL for details on the options and +defaults. + +For example: + +:: + + ANALYTICS_SCHEDULE_ANSWER_DISTRIBUTION: true + ANALYTICS_SCHEDULE_ANSWER_DISTRIBUTION_EXTRA_VARS: "@{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/analytics-tasks/answer-dist.yml" + + ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL: true + ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS: + TASKS_REPO: "https://github.com/open-craft/edx-analytics-pipeline.git" + TASKS_BRANCH: "analytics-sandbox" + CONFIG_REPO: "https://github.com/open-craft/edx-analytics-configuration.git" + CONFIG_BRANCH: "analytics-sandbox" + JOB_NAME: "ImportEnrollmentsIntoMysql" + JOB_FREQUENCY: "@monthly" + CLUSTER_NAME: "AnswerDistribution" + EMR_EXTRA_VARS: "@/home/jenkins/emr-vars.yml" # see [EMR Configuration](#emr-configuration) + FROM_DATE: "2016-01-01" + TASK_USER: "hadoop" + NOTIFY_EMAIL_ADDRESSES: "staff@example.com + +EMR Configuration +................. + +The ``EMR_EXTRA_VARS`` parameter for each analytics task is passed by the +analytics task shell command to the ansible playbook for provisioning and +terminating the EMR cluster. + +Because ``EMR_EXTRA_VARS`` passes via the shell, it may reference other +analytics task parameters as shell variables, e.g. ``$S3_PACKAGE_BUCKET``. + +**File path** + +The easiest way to modify this parameter is to provide a ``@/path/to/file.yml`` +or ``@/path/to/file.json``. The file path must be absolute, e.g., + +:: + + ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS: + EMR_EXTRA_VARS: '@/home/jenkins/emr-vars.yml' + +Or relative to the analytics-configuration repo cloned by the analytics task, +e.g., + +:: + + ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS: + EMR_EXTRA_VARS: '@./config/emr-vars.yml' + +To use a path relative to the analytics task workspace, build an absolute path +using the ``$WORKSPACE`` variable provided by Jenkins, e.g., + +:: + + ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS: + EMR_EXTRA_VARS: '@$WORKSPACE/analytics-secure-config/emr-vars.yml' + +**Raw JSON** + +The other option, utilised by the DSL ``EMR_EXTRA_VARS`` default value, is to +use a JSON string. Take care to use a *JSON string*, not raw JSON itself, as +YAML is a JSON superset, and we don't want the JSON to be parsed by ansible. + +Also, because formatting valid JSON is difficult, be sure to run the text +through a JSON validator before deploying. + +As with file paths, the JSON text can use analytics task parameters as shell +variables, e.g., + +:: + + ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS: + AUTOMATION_KEYPAIR_NAME: 'analytics-sandbox' + VPC_SUBNET_ID: 'subnet-cd1b9c94' + EMR_LOG_BUCKET: 's3://analytics-sandbox-emr-logs' + CLUSTER_NAME: 'Analytics EMR Cluster' + EMR_EXTRA_VARS: | + { + "name": "$CLUSTER_NAME", + "keypair_name": "$AUTOMATION_KEYPAIR_NAME", + "vpc_subnet_id": "$VPC_SUBNET_ID", + "log_uri": "$EMR_LOG_BUCKET" + } + +Other useful variables +---------------------- + +- ``JENKINS_ANALYTICS_CONCURRENT_JOBS_COUNT``: Configures number of executors + (or concurrent jobs this Jenkins instance can execute). Defaults to ``2``. + +General configuration +===================== + +Following variables are used by this role: + +Variables used by command waiting on Jenkins start-up after running +``jenkins_master`` role: + +:: + + jenkins_connection_retries: 60 + jenkins_connection_delay: 0.5 + +Auth realm +---------- + +Jenkins auth realm encapsulates user management in Jenkins, that is: + +- What users can log in +- What credentials they use to log in + +Realm type stored in ``jenkins_auth_realm.name`` variable. + +In future we will try to enable other auth domains, while preserving the +ability to run cli. + +Unix Realm +.......... + +For now only ``unix`` realm supported -- which requires every Jenkins user to +have a shell account on the server. + +Unix realm requires the following settings: + +- ``service``: Jenkins uses PAM configuration for this service. ``su`` is a + safe choice as it doesn't require a user to have the ability to login + remotely. +- ``plain_password``: plaintext password, **you must change** default values. + +Example realm configuration: + +:: + + jenkins_auth_realm: + name: unix + service: su + plain_password: jenkins + +Seed job configuration +---------------------- + +Seed job is configured in ``jenkins_seed_job`` variable, which has the +following attributes: + +- ``name``: Name of the job in Jenkins. +- ``time_trigger``: A Jenkins cron entry defining how often this job should + run. +- ``removed_job_action``: what to do when a job created by a previous run of + seed job is missing from current run. This can be either ``DELETE`` + or\ ``IGNORE``. +- ``removed_view_action``: what to do when a view created by a previous run of + seed job is missing from current run. This can be either ``DELETE`` + or\ ``IGNORE``. +- ``scm``: Scm object is used to define seed job repository and related + settings. It has the following properties: +- ``scm.type``: It must have value of ``git``. +- ``scm.url``: URL for the repository. +- ``scm.credential_id``: Id of a credential to use when authenticating to the + repository. This setting is optional. If it is missing or falsy, credentials + will be omitted. Please note that when you use ssh repository url, you'll + need to set up a key regardless of whether the repository is public or + private (to establish an ssh connection you need a valid public key). +- ``scm.target_jobs``: A shell glob expression relative to repo root selecting + jobs to import. +- ``scm.additional_classpath``: A path relative to repo root, pointing to a + directory that contains additional groovy scripts used by the seed jobs. + +Example scm configuration: + +:: + + jenkins_seed_job: + name: seed + time_trigger: "H * * * *" + removed_job_action: "DELETE" + removed_view_action: "IGNORE" + scm: + type: git + url: "git@github.com:edx/jenkins-job-dsl-internal.git" + credential_id: "github-deploy-key" + target_jobs: "jobs/analytics-edx-jenkins.edx.org/*Jobs.groovy" + additional_classpath: "src/main/groovy" + +Known issues +************ + +1. Playbook named ``execute_ansible_cli.yaml``, should be converted to an + Ansible module (it is already used in a module-ish way). +2. Anonymous user has discover and get job permission, as without it + ``get-job``, ``build <>`` commands wouldn't work. Giving anonymous + these permissions is a workaround for transient Jenkins issue (reported + `couple`_ `of`_ `times`_). +3. We force unix authentication method -- that is, every user that can login to + Jenkins also needs to have a shell account on master. + +Dependencies +************ + +- ``jenkins_master`` + +.. _Jenkins Security: #jenkins-security +.. _Jenkins Credentials: #jenkins-credentials +.. _Jenkins Seed Job Configuration: #jenkins-seed-job-configuration +.. _Github OAuth plugin: https://wiki.jenkins-ci.org/display/JENKINS/Github+OAuth+Plugin +.. _Matrix Authorization Strategy: https://wiki.jenkins-ci.org/display/JENKINS/Matrix+Authorization+Strategy+Plugin +.. _Authorization: #authorization +.. _GitHub application registration: https://github.com/settings/applications/new +.. _Security Note: #security-note +.. _Github third-party application restrictions: https://github.com/organizations/open-craft/settings/oauth_application_policy +.. _stackoverflow: http://stackoverflow.com/a/12472740 +.. _couple: https://issues.jenkins-ci.org/browse/JENKINS-12543 +.. _of: https://issues.jenkins-ci.org/browse/JENKINS-11024 +.. _times: https://issues.jenkins-ci.org/browse/JENKINS-22143 diff --git a/playbooks/roles/jenkins_analytics/defaults/main.yml b/playbooks/roles/jenkins_analytics/defaults/main.yml new file mode 100644 index 00000000000..0e0ef954a16 --- /dev/null +++ b/playbooks/roles/jenkins_analytics/defaults/main.yml @@ -0,0 +1,300 @@ +--- +# See README.rst for variable descriptions + +JENKINS_ANALYTICS_EXTRA_PKGS: + # Packages required to build edx-analytics-pipeline + - libpq-dev + - libffi-dev + # Packages required to use aws sts assume-role + - jq + + +# Change this default password: (see README.rst to see how you can do it) +JENKINS_ANALYTICS_USER_PASSWORD_PLAIN: jenkins +JENKINS_ANALYTICS_AUTH_REALM: github_oauth +JENKINS_ANALYTICS_AUTH_ADMINISTRATORS: [] +JENKINS_ANALYTICS_AUTH_JOB_BUILDERS: [] +JENKINS_ANALYTICS_AUTH_ANONYMOUS_PERMISSIONS: [] +JENKINS_ANALYTICS_AUTH_ADMINISTRATOR_PERMISSIONS: + - com.cloudbees.plugins.credentials.CredentialsProvider.Create + - com.cloudbees.plugins.credentials.CredentialsProvider.Delete + - com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains + - com.cloudbees.plugins.credentials.CredentialsProvider.Update + - com.cloudbees.plugins.credentials.CredentialsProvider.View + - hudson.model.Computer.Build + - hudson.model.Computer.Configure + - hudson.model.Computer.Connect + - hudson.model.Computer.Create + - hudson.model.Computer.Delete + - hudson.model.Computer.Disconnect + - hudson.model.Hudson.Administer + - hudson.model.Hudson.ConfigureUpdateCenter + - hudson.model.Hudson.Read + - hudson.model.Hudson.RunScripts + - hudson.model.Hudson.UploadPlugins + - hudson.model.Item.Build + - hudson.model.Item.Cancel + - hudson.model.Item.Configure + - hudson.model.Item.Create + - hudson.model.Item.Delete + - hudson.model.Item.Discover + - hudson.model.Item.Move + - hudson.model.Item.Read + - hudson.model.Item.Workspace + - hudson.model.Run.Delete + - hudson.model.Run.Update + - hudson.model.View.Configure + - hudson.model.View.Create + - hudson.model.View.Delete + - hudson.model.View.Read + - hudson.scm.SCM.Tag +JENKINS_ANALYTICS_AUTH_JOB_BUILDER_PERMISSIONS: + - com.cloudbees.plugins.credentials.CredentialsProvider.Create + - com.cloudbees.plugins.credentials.CredentialsProvider.Delete + - com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains + - com.cloudbees.plugins.credentials.CredentialsProvider.Update + - com.cloudbees.plugins.credentials.CredentialsProvider.View + - hudson.model.Hudson.Read + - hudson.model.Hudson.RunScripts + - hudson.model.Item.Build + - hudson.model.Item.Cancel + - hudson.model.Item.Configure + - hudson.model.Item.Create + - hudson.model.Item.Delete + - hudson.model.Item.Discover + - hudson.model.Item.Move + - hudson.model.Item.Read + - hudson.model.Item.Workspace + - hudson.model.Run.Delete + - hudson.model.Run.Update + - hudson.model.View.Configure + - hudson.model.View.Create + - hudson.model.View.Delete + - hudson.model.View.Read + - hudson.scm.SCM.Tag + +JENKINS_ANALYTICS_GITHUB_OAUTH_CLIENT_ID: null +JENKINS_ANALYTICS_GITHUB_OAUTH_CLIENT_SECRET: null +JENKINS_ANALYTICS_GITHUB_OAUTH_SCOPES: + - read:org + - user:email +JENKINS_ANALYTICS_GITHUB_OAUTH_WEB_URI: 'https://github.com/' +JENKINS_ANALYTICS_GITHUB_OAUTH_API_URI: 'https://api.github.com/' + +JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID: 'github-deploy-key' +JENKINS_ANALYTICS_GITHUB_CREDENTIAL_USER: 'git' +JENKINS_ANALYTICS_GITHUB_CREDENTIAL_PASSPHRASE: null +JENKINS_ANALYTICS_GITHUB_CREDENTIAL_KEY: null + +JENKINS_ANALYTICS_CONCURRENT_JOBS_COUNT: 2 + +ANALYTICS_SCHEDULE_SECURE_REPO_URL: null +ANALYTICS_SCHEDULE_SECURE_REPO_DEST: "analytics-secure-config" +ANALYTICS_SCHEDULE_SECURE_REPO_VERSION: "master" +ANALYTICS_SCHEDULE_SECURE_REPO_CREDENTIAL_ID: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID }}" +ANALYTICS_SCHEDULE_SECURE_REPO_MASTER_SSH_CREDENTIAL_FILE: "aws.pem" +ANALYTICS_SCHEDULE_JOBS_DSL_REPO_URL: "git@github.com:edx/jenkins-job-dsl.git" +ANALYTICS_SCHEDULE_JOBS_DSL_REPO_VERSION: "master" +ANALYTICS_SCHEDULE_JOBS_DSL_REPO_CREDENTIAL_ID: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID }}" + +ANALYTICS_SCHEDULE_SEED_JOB_NAME: "AnalyticsSeedJob" +ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_ID: "ssh-access-key" +ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_USER: "hadoop" +ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_PASSPHRASE: null +ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_FILE: "{{ jenkins_home }}/workspace/{{ ANALYTICS_SCHEDULE_SEED_JOB_NAME }}/{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST }}/{{ ANALYTICS_SCHEDULE_SECURE_REPO_MASTER_SSH_CREDENTIAL_FILE }}" +ANALYTICS_SCHEDULE_JOBS_DSL_CLASSPATH: | + src/main/groovy + lib/*.jar +ANALYTICS_SCHEDULE_JOBS_DSL_TARGET_JOBS: + - jobs/analytics-edx-jenkins.edx.org/*Jobs.groovy + +JENKINS_ANALYTICS_CREDENTIALS: + - id: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_ID }}" + scope: GLOBAL + username: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_USER }}" + type: ssh-private-key + passphrase: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_PASSPHRASE }}" + description: github access key, generated by ansible + privatekey: "{{ JENKINS_ANALYTICS_GITHUB_CREDENTIAL_KEY }}" + - id: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_ID }}" + scope: GLOBAL + username: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_USER }}" + type: ssh-private-keyfile + passphrase: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_PASSPHRASE }}" + description: ssh access key, generated by ansible + privatekey: "{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_FILE }}" + +ANALYTICS_SCHEDULE_COMMON_VARS: '' +ANALYTICS_SCHEDULE_ANSWER_DISTRIBUTION: true +ANALYTICS_SCHEDULE_ANSWER_DISTRIBUTION_EXTRA_VARS: '' +ANALYTICS_SCHEDULE_COURSE_ACTIVITY_WEEKLY: true +ANALYTICS_SCHEDULE_COURSE_ACTIVITY_WEEKLY_EXTRA_VARS: '' +ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL: true +ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS: '' +ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_ALL_VIDEO: true +ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_ALL_VIDEO_EXTRA_VARS: '' +ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_COURSE_ENROLL_BY_COUNTRY: true +ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_COURSE_ENROLL_BY_COUNTRY_EXTRA_VARS: '' + +jenkins_credentials_root: '/tmp/credentials' +jenkins_credentials_file_dest: "{{ jenkins_credentials_root }}/credentials.json" +jenkins_credentials_script: "{{ jenkins_credentials_root }}/addCredentials.groovy" +jenkins_seed_job_root: '{{ jenkins_home }}/workspace/_seedjob' +jenkins_seed_job_script: "{{ jenkins_seed_job_root }}/seedJob.dsl" +jenkins_seed_job_xmlfile: "{{ jenkins_seed_job_root }}/job-dsl-core/{{ jenkins_seed_job.name }}.xml" + +jenkins_connection_retries: 240 +jenkins_connection_delay: 1 + +jenkins_private_keyfile: "{{ jenkins_home }}/.ssh/id_rsa" +jenkins_public_keyfile: "{{ jenkins_private_keyfile }}.pub" +jenkins_admin_users: + - "{{ jenkins_user }}" + +jenkins_auth_realms_available: + none: + name: none + cli_auth: '' + unix: + name: unix + service: su + plain_password: "{{ JENKINS_ANALYTICS_USER_PASSWORD_PLAIN }}" + username: "{{ jenkins_user }}" + cli_auth: '-i {{ jenkins_private_keyfile }}' + github_oauth: + name: github_oauth + webUri: "{{ JENKINS_ANALYTICS_GITHUB_OAUTH_WEB_URI }}" + apiUri: "{{ JENKINS_ANALYTICS_GITHUB_OAUTH_API_URI }}" + clientId: "{{ JENKINS_ANALYTICS_GITHUB_OAUTH_CLIENT_ID }}" + clientSecret: "{{ JENKINS_ANALYTICS_GITHUB_OAUTH_CLIENT_SECRET }}" + oauthScopes: "{{ JENKINS_ANALYTICS_GITHUB_OAUTH_SCOPES }}" + cli_auth: '-i {{ jenkins_private_keyfile }}' + +jenkins_auth_realm: "{{ jenkins_auth_realms_available[JENKINS_ANALYTICS_AUTH_REALM] }}" + +jenkins_auth_users: + anonymous: + - anonymous + administrators: "{{ jenkins_admin_users + JENKINS_ANALYTICS_AUTH_ADMINISTRATORS }}" + job_builders: "{{ JENKINS_ANALYTICS_AUTH_JOB_BUILDERS | default([]) }}" + +jenkins_auth_permissions: + anonymous: "{{ JENKINS_ANALYTICS_AUTH_ANONYMOUS_PERMISSIONS }}" + administrators: "{{ JENKINS_ANALYTICS_AUTH_ADMINISTRATOR_PERMISSIONS }}" + job_builders: "{{ JENKINS_ANALYTICS_AUTH_JOB_BUILDER_PERMISSIONS }}" + +# For now only a single seed job is supported, adding more would require +# Ansible 2.+ or converting _execute_jenkins_cli to a module +jenkins_seed_job: + name: "{{ ANALYTICS_SCHEDULE_SEED_JOB_NAME }}" + multiscm: + - scm: + type: git + url: "{{ ANALYTICS_SCHEDULE_JOBS_DSL_REPO_URL }}" + dest: "" + branch: '\$DSL_BRANCH' + credential_id: "{{ ANALYTICS_SCHEDULE_JOBS_DSL_REPO_CREDENTIAL_ID | default('') }}" + - scm: + type: git + url: "{{ ANALYTICS_SCHEDULE_SECURE_REPO_URL | default('') }}" + dest: "{{ ANALYTICS_SCHEDULE_SECURE_REPO_DEST | default('') }}" + branch: '\$SECURE_BRANCH' + credential_id: "{{ ANALYTICS_SCHEDULE_SECURE_REPO_CREDENTIAL_ID | default('') }}" + analytics_tasks: + - id: ANSWER_DISTRIBUTION + enable: "{{ ANALYTICS_SCHEDULE_ANSWER_DISTRIBUTION | default(true) }}" + extra_vars: "{{ ANALYTICS_SCHEDULE_ANSWER_DISTRIBUTION_EXTRA_VARS | default('') }}" + - id: COURSE_ACTIVITY_WEEKLY + enable: "{{ ANALYTICS_SCHEDULE_COURSE_ACTIVITY_WEEKLY | default(true) }}" + extra_vars: "{{ ANALYTICS_SCHEDULE_COURSE_ACTIVITY_WEEKLY_EXTRA_VARS | default('') }}" + - id: IMPORT_ENROLLMENTS_INTO_MYSQL + enable: "{{ ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL | default(true) }}" + extra_vars: "{{ ANALYTICS_SCHEDULE_IMPORT_ENROLLMENTS_INTO_MYSQL_EXTRA_VARS | default('') }}" + - id: INSERT_TO_MYSQL_ALL_VIDEO + enable: "{{ ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_ALL_VIDEO | default(true) }}" + extra_vars: "{{ ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_ALL_VIDEO_EXTRA_VARS | default('') }}" + - id: INSERT_TO_MYSQL_COURSE_ENROLL_BY_COUNTRY + enable: "{{ ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_COURSE_ENROLL_BY_COUNTRY | default(true) }}" + extra_vars: "{{ ANALYTICS_SCHEDULE_INSERT_TO_MYSQL_COURSE_ENROLL_BY_COUNTRY_EXTRA_VARS | default('') }}" + dsl: + gradle_tasks: + - clean + - libs + - test + removed_view_action: IGNORE + removed_job_action: IGNORE + additional_classpath: "{{ ANALYTICS_SCHEDULE_JOBS_DSL_CLASSPATH }}" + target_jobs: "{{ ANALYTICS_SCHEDULE_JOBS_DSL_TARGET_JOBS }}" + +jenkins_analytics_plugins: + - { name: "ansicolor", version: "0.4.1" } + - { name: "ant", version: "1.2" } + - { name: "build-flow-plugin", version: "0.17" } + - { name: "build-flow-test-aggregator", version: "1.0" } + - { name: "build-flow-toolbox-plugin", version: "0.1" } + - { name: "build-name-setter", version: "1.3" } + - { name: "build-pipeline-plugin", version: "1.4" } + - { name: "build-timeout", version: "1.14.1" } + - { name: "build-user-vars-plugin", version: "1.5" } + - { name: "buildgraph-view", version: "1.1.1" } + - { name: "cloudbees-folder", version: "5.2.1" } + - { name: "cobertura", version: "1.9.6" } + - { name: "copyartifact", version: "1.32.1" } + - { name: "copy-to-slave", version: "1.4.3" } + - { name: "credentials", version: "2.1.4" } + - { name: "credentials-binding", version: "1.10" } + - { name: "dashboard-view", version: "2.9.1" } + - { name: "ec2", version: "1.28" } + - { name: "envinject", version: "1.92.1" } + - { name: "external-monitor-job", version: "1.4" } + - { name: "ghprb", version: "1.22.4" } + - { name: "git", version: "2.4.0"} + - { name: "git-client", version: "1.19.0"} + - { name: "github", version: "1.14.0" } + - { name: "github-api", version: "1.69" } + - { name: "github-oauth", version: "0.22.3" } + - { name: "github-sqs-plugin", version: "1.5" } + - { name: "gradle", version: "1.24" } + - { name: "grails", version: "1.7" } + - { name: "groovy-postbuild", version: "2.2" } + - { name: "htmlpublisher", version: "1.3" } + - { name: "javadoc", version: "1.3" } + - { name: "jobConfigHistory", version: "2.10" } + - { name: "job-dsl", version: "1.43" } + - { name: "junit", version: "1.3" } + - { name: "ldap", version: "1.11" } + - { name: "mailer", version: "1.16" } + - { name: "mapdb-api", version: "1.0.6.0" } + - { name: "mask-passwords", version: "2.8" } + - { name: "matrix-auth", version: "1.2" } + - { name: "matrix-project", version: "1.4" } + - { name: "monitoring", version: "1.56.0" } + - { name: "multiple-scms", version: "0.5" } + - { name: "nested-view", version: "1.10" } + - { name: "next-build-number", version: "1.0" } + - { name: "node-iterator-api", version: "1.5" } + - { name: "notification", version: "1.5" } + - { name: "pam-auth", version: "1.2" } + - { name: "parameterized-trigger", version: "2.25" } + - { name: "postbuild-task", version: "1.8" } + - { name: "plain-credentials", version: "1.1" } + - { name: "PrioritySorter", version: "2.9" } + - { name: "rebuild", version: "1.25" } + - { name: "sauce-ondemand", version: "1.61" } + - { name: "scm-api", version: "0.2" } + - { name: "script-security", version: "1.12" } + - { name: "s3", version: "0.6" } + - { name: "ssh-agent", version: "1.5" } + - { name: "ssh-credentials", version: "1.11" } + - { name: "ssh-slaves", version: "1.9" } + - { name: "shiningpanda", version: "0.23" } + - { name: "throttle-concurrents", version: "1.9.0" } + - { name: "tmpcleaner", version: "1.1" } + - { name: "token-macro", version: "1.10" } + - { name: "timestamper", version: "1.5.15" } + - { name: "thinBackup", version: "1.7.4" } + - { name: "translation", version: "1.12" } + - { name: "violations", version: "0.7.11" } + - { name: "windows-slaves", version: "1.0" } + - { name: "workflow-step-api", version: "1.14.2" } + - { name: "xunit", version: "1.93"} diff --git a/playbooks/roles/jenkins_analytics/meta/main.yml b/playbooks/roles/jenkins_analytics/meta/main.yml new file mode 100644 index 00000000000..0d1a945ac5b --- /dev/null +++ b/playbooks/roles/jenkins_analytics/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - role: jenkins_master + jenkins_plugins: "{{ jenkins_analytics_plugins }}" diff --git a/playbooks/roles/jenkins_analytics/tasks/execute_jenkins_cli.yaml b/playbooks/roles/jenkins_analytics/tasks/execute_jenkins_cli.yaml new file mode 100644 index 00000000000..5bae3c9ea98 --- /dev/null +++ b/playbooks/roles/jenkins_analytics/tasks/execute_jenkins_cli.yaml @@ -0,0 +1,39 @@ +--- + +- set_fact: + jenkins_cli_root: "/tmp/jenkins-cli/{{ ansible_ssh_user }}" +- set_fact: + jenkins_cli_jar: "{{ jenkins_cli_root }}/jenkins_cli.jar" + +- name: create cli dir + file: name={{ jenkins_cli_root }} state=directory mode="700" + +- name: Wait for Jenkins CLI + uri: + url: "http://localhost:{{ jenkins_port }}/cli/" + method: GET + return_content: yes + status_code: 200,403 + register: result + until: (result.status is defined) and ((result.status == 403) or (result.status == 200)) + retries: "{{ jenkins_connection_retries }}" + delay: "{{ jenkins_connection_delay }}" + changed_when: false + +- name: get cli + get_url: + url: "http://localhost:{{ jenkins_port }}/jnlpJars/jenkins-cli.jar" + dest: "{{ jenkins_cli_jar }}" + +- name: execute command + shell: "{{ jenkins_command_prefix|default('') }} java -jar {{ jenkins_cli_jar }} -s http://localhost:{{ jenkins_port }} {{ jenkins_auth_realm.cli_auth }} {{ jenkins_command_string }}" + register: jenkins_command_output + ignore_errors: "{{ jenkins_ignore_cli_errors|default (False) }}" + +- name: "clean up --- remove the credentials dir" + file: + name: jenkins_cli_root + state: absent + +- name: "clean up --- remove cached Jenkins credentials" + command: rm -rf $HOME/.jenkins diff --git a/playbooks/roles/jenkins_analytics/tasks/main.yml b/playbooks/roles/jenkins_analytics/tasks/main.yml new file mode 100644 index 00000000000..228185c749c --- /dev/null +++ b/playbooks/roles/jenkins_analytics/tasks/main.yml @@ -0,0 +1,221 @@ +--- + +- name: install jenkins analytics extra system packages + apt: + pkg={{ item }} state=present update_cache=yes + with_items: "{{ JENKINS_ANALYTICS_EXTRA_PKGS }}" + tags: + - jenkins + +- name: Install httplib2 (required by uri module used in this role) + pip: + name: httplib2 + tags: + - jenkins + +# Jenkins authentication/authorization + +- fail: msg="Please change default password for jenkins user" + when: jenkins_auth_realm.name == "unix" and jenkins_auth_realm.plain_password == jenkins_auth_realm.username + tags: + - jenkins-auth + +- fail: msg="Please change default github oauth client key and secret" + when: jenkins_auth_realm.name == "github_oauth" and ((not jenkins_auth_realm.clientId) or (not jenkins_auth_realm.clientSecret)) + tags: + - jenkins-auth + +- name: generate jenkins user password + shell: "openssl passwd -1 '{{ jenkins_auth_realm.plain_password | quote }}'" + register: jenkins_user_password_hash + no_log: True + when: jenkins_auth_realm.name == "unix" + tags: + - jenkins-auth + +- user: + name: "{{ jenkins_user }}" + state: present + groups: shadow + append: yes + password: '!' # locked + update_password: always + generate_ssh_key: yes + ssh_key_file: "{{ jenkins_private_keyfile }}" + tags: + - jenkins-auth + +- user: + name: "{{ jenkins_user }}" + password: "{{ jenkins_user_password_hash.stdout }}" + when: jenkins_auth_realm.name == "unix" + tags: + - jenkins-auth + +- name: template config.xml + template: + src: jenkins.config.main.xml + dest: "{{ jenkins_home }}/config.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - jenkins-auth + +- name: jenkins user config dir + file: + name: "{{ jenkins_home }}/users/{{ jenkins_user }}" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - jenkins-auth + +- name: template jenkins user config.xml + template: + src: jenkins.user.config.xml + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + force: no # don't overwrite if already there + tags: + - jenkins-auth + +- name: fetch jenkins user public key + shell: "cat {{ jenkins_public_keyfile }}" + register: jenkins_public_key + tags: + - jenkins-auth + +- name: add jenkins user public key + lineinfile: + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + state: present + regexp: "^\\s*" + line: "{{ jenkins_public_key.stdout }}" + tags: + - jenkins-auth + +# Unconditionally restart Jenkins, this has two side-effects: +# 1. Jenkins uses new auth realm +# 2. We guarantee that jenkins is started (this is not certain +# as Jenkins is started by handlers from jenkins_master, +# these handlers are launched after this role). + +- name: restart Jenkins + service: name=jenkins state=restarted + tags: + - jenkins-auth + +# Upload Jenkins credentials + +- name: create credentials dir + file: name={{ jenkins_credentials_root }} state=directory + tags: + - jenkins-credentials + +- name: upload groovy credentials script + template: + src: addCredentials.groovy + dest: "{{ jenkins_credentials_script }}" + mode: "600" + tags: + - jenkins-credentials + +- name: upload credentials file + template: + src: credentials_file.json.j2 + dest: "{{ jenkins_credentials_file_dest }}" + mode: "600" + owner: "{{ jenkins_user }}" + tags: + - jenkins-credentials + +- name: add credentials + include: execute_jenkins_cli.yaml + vars: + jenkins_command_string: "groovy {{ jenkins_credentials_script }}" + tags: + - jenkins-credentials + +- name: clean up + file: name={{ jenkins_credentials_root }} state=absent + tags: + - jenkins-credentials + +# Upload seed job + +- name: create seed job dir + file: name={{ jenkins_seed_job_root }} state=directory owner={{ jenkins_user }} group={{ jenkins_group }} + tags: + - jenkins-seed-job + +- name: clone job-dsl-plugin repo + git: + repo: https://github.com/jenkinsci/job-dsl-plugin.git + depth: 1 + dest: "{{ jenkins_seed_job_root }}" + become: yes + become_user: "{{ jenkins_user }}" + tags: + - jenkins-seed-job + +- name: upload groovy seed job script + template: + src: seedJob.groovy + dest: "{{ jenkins_seed_job_script }}" + mode: "600" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - jenkins-seed-job + +- name: generate seed job xml + shell: "GRADLE_OPTS=\"-Dorg.gradle.daemon=true\" ./gradlew run -Pargs={{ jenkins_seed_job_script }}" + args: + chdir: "{{ jenkins_seed_job_root }}" + become: yes + become_user: "{{ jenkins_user }}" + tags: + - jenkins-seed-job + +- name: check if job is present + include: execute_jenkins_cli.yaml + vars: + jenkins_command_string: "get-job {{ jenkins_seed_job.name }}" + jenkins_ignore_cli_errors: yes + tags: + - jenkins-seed-job + +- set_fact: + get_job_output: "{{ jenkins_command_output }}" + tags: + - jenkins-seed-job + +# Upload seed job to Jenkins + +- name: Create seed job if absent + include: execute_jenkins_cli.yaml + vars: + jenkins_command_string: "create-job {{ jenkins_seed_job.name }}" + jenkins_command_prefix: "cat {{ jenkins_seed_job_xmlfile }} | " + when: get_job_output.rc != 0 + tags: + - jenkins-seed-job + +- name: update seed job + include: execute_jenkins_cli.yaml + vars: + jenkins_command_string: "update-job {{ jenkins_seed_job.name }}" + jenkins_command_prefix: "cat {{ jenkins_seed_job_xmlfile }} | " + when: get_job_output.rc == 0 + tags: + - jenkins-seed-job + +# Build the seed job + +- name: Build the seed job + include: execute_jenkins_cli.yaml + vars: + jenkins_command_string: "build {{ jenkins_seed_job.name }} -s" + tags: + - jenkins-seed-job diff --git a/playbooks/roles/jenkins_analytics/templates/addCredentials.groovy b/playbooks/roles/jenkins_analytics/templates/addCredentials.groovy new file mode 100644 index 00000000000..1d6a86ecc7b --- /dev/null +++ b/playbooks/roles/jenkins_analytics/templates/addCredentials.groovy @@ -0,0 +1,104 @@ +/** + * This script can be run via the Jenkins CLI as follows: + * + * java -jar /var/jenkins/war/WEB-INF/jenkins-cli.jar -s http://localhost:8080 groovy addCredentials.groovy + * + * For a given json file, this script will create a set of credentials. + * The script can be run safely multiple times and it will update each changed credential + * (deleting credentials is not currently supported). + * + * This is useful in conjunction with the job-dsl to bootstrap a barebone Jenkins instance. + * + * This script will currently fail if the plugins it requires have not been installed: + * + * credentials-plugin + * credentials-ssh-plugin + */ + + +import com.cloudbees.plugins.credentials.Credentials +import com.cloudbees.plugins.credentials.CredentialsScope +import com.cloudbees.plugins.credentials.common.IdCredentials +import com.cloudbees.plugins.credentials.domains.Domain +import hudson.model.* +import com.cloudbees.plugins.credentials.SystemCredentialsProvider +import com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl +import com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey +import groovy.json.JsonSlurper; + +boolean addUsernamePassword(scope, id, username, password, description) { + provider = SystemCredentialsProvider.getInstance() + provider.getCredentials().add(new UsernamePasswordCredentialsImpl(scope, id, description, username, password)) + provider.save() + return true +} + +boolean addSSHUserPrivateKey(scope, id, username, privateKey, passphrase, description) { + provider = SystemCredentialsProvider.getInstance() + source = new BasicSSHUserPrivateKey.DirectEntryPrivateKeySource(privateKey) + provider.getCredentials().add(new BasicSSHUserPrivateKey(scope, id, username, source, passphrase, description)) + provider.save() + return true +} + +boolean addSSHUserPrivateKeyFile(scope, id, username, privateKey, passphrase, description) { + provider = SystemCredentialsProvider.getInstance() + source = new BasicSSHUserPrivateKey.FileOnMasterPrivateKeySource(privateKey) + provider.getCredentials().add(new BasicSSHUserPrivateKey(scope, id, username, source, passphrase, description)) + provider.save() + return true +} + +def jsonFile = new File("{{ jenkins_credentials_file_dest }}"); + +if (!jsonFile.exists()){ + throw RuntimeException("Credentials file does not exist on remote host"); +} + +def jsonSlurper = new JsonSlurper() +def credentialList = jsonSlurper.parse(new FileReader(jsonFile)) + +credentialList.each { credential -> + + if (credential.scope != "GLOBAL"){ + throw new RuntimeException("Sorry for now only global scope is supported"); + } + + scope = CredentialsScope.valueOf(credential.scope) + + def provider = SystemCredentialsProvider.getInstance(); + + def toRemove = []; + + for (Credentials current_credentials: provider.getCredentials()){ + if (current_credentials instanceof IdCredentials){ + if (current_credentials.getId() == credential.id){ + toRemove.add(current_credentials); + } + } + } + + toRemove.each {curr ->provider.getCredentials().remove(curr)}; + + if (credential.type == "username-password") { + addUsernamePassword(scope, credential.id, credential.username, credential.password, credential.description) + } + + if (credential.type == "ssh-private-key") { + + if (credential.passphrase != null && credential.passphrase.trim().length() == 0){ + credential.passphrase = null; + } + + addSSHUserPrivateKey(scope, credential.id, credential.username, credential.privatekey, credential.passphrase, credential.description) + } + + if (credential.type == "ssh-private-keyfile") { + + if (credential.passphrase != null && credential.passphrase.trim().length() == 0){ + credential.passphrase = null; + } + + addSSHUserPrivateKeyFile(scope, credential.id, credential.username, credential.privatekey, credential.passphrase, credential.description) + } +} diff --git a/playbooks/roles/jenkins_analytics/templates/credentials_file.json.j2 b/playbooks/roles/jenkins_analytics/templates/credentials_file.json.j2 new file mode 100644 index 00000000000..19ef17d5b46 --- /dev/null +++ b/playbooks/roles/jenkins_analytics/templates/credentials_file.json.j2 @@ -0,0 +1 @@ +{{ JENKINS_ANALYTICS_CREDENTIALS|to_json }} \ No newline at end of file diff --git a/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml b/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml new file mode 100644 index 00000000000..eddc5d0dacd --- /dev/null +++ b/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml @@ -0,0 +1,60 @@ + + + + 1.638 + {{ JENKINS_ANALYTICS_CONCURRENT_JOBS_COUNT }} + NORMAL + true + {% if jenkins_auth_realm.name == "none" %} + + + {% else %} + + {% for permission_group, permissions in jenkins_auth_permissions.items() %} + {% for permission in permissions %} + {% for user in jenkins_auth_users[permission_group] | default([]) %} + {{ permission }}:{{ user }} + {% endfor %} + {% endfor %} + {% endfor %} + + {% if jenkins_auth_realm.name == "unix" %} + + {{ jenkins_auth_realm.service }} + + {% elif jenkins_auth_realm.name == "github_oauth" %} + + {{ jenkins_auth_realm.webUri }} + {{ jenkins_auth_realm.apiUri }} + {{ jenkins_auth_realm.clientId }} + {{ jenkins_auth_realm.clientSecret }} + {{ jenkins_auth_realm.oauthScopes|join(',') }} + + {% endif %} + {% endif %} + false + + ${JENKINS_HOME}/workspace/${ITEM_FULLNAME} + ${ITEM_ROOTDIR}/builds + + + + + + 5 + 0 + + + + All + false + false + + + + All + 0 + + + + diff --git a/playbooks/roles/jenkins_analytics/templates/jenkins.user.config.xml b/playbooks/roles/jenkins_analytics/templates/jenkins.user.config.xml new file mode 100644 index 00000000000..b1e892fd83c --- /dev/null +++ b/playbooks/roles/jenkins_analytics/templates/jenkins.user.config.xml @@ -0,0 +1,53 @@ + + + {{ jenkins_user }} + + + + + + + + + + + + + + + + + + + + + edx + shadow + jenkins + authenticated + + 1457073573763 + + + + + + + All + false + false + + + + + + + + + + + + false + + + diff --git a/playbooks/roles/jenkins_analytics/templates/seedJob.groovy b/playbooks/roles/jenkins_analytics/templates/seedJob.groovy new file mode 100644 index 00000000000..0dd4576684c --- /dev/null +++ b/playbooks/roles/jenkins_analytics/templates/seedJob.groovy @@ -0,0 +1,81 @@ +/* + Jenkins Analytics Seed Job DSL template + */ + +job('{{ jenkins_seed_job.name }}') { + + description('Creates and configures the analytics task jobs.') + + multiscm { + {% for scm in jenkins_seed_job.multiscm %} + {% if scm.url %} + git { + remote { + url('http://github.com/%7B%7B%20scm.url%20%7D%7D') + branch("{{ scm.branch | default('master') }}") + {% if scm.credential_id %} + credentials('{{ scm.credential_id }}') + {% endif %} + } + extensions { + {% if scm.dest %} + relativeTargetDirectory('{{ scm.dest }}') + {% endif %} + cleanAfterCheckout() + pruneBranches() + } + } + {% endif %} + {% endfor %} + } + parameters { + credentialsParam('MASTER_SSH_CREDENTIAL_ID', { + defaultValue('{{ ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_ID | default("") }}') + description('Jenkins Credential with ssh access to EMR resources.') + }) + credentialsParam('GIT_CREDENTIAL_ID', { + defaultValue('{{ ANALYTICS_SCHEDULE_SECURE_REPO_CREDENTIAL_ID | default("") }}') + description('Jenkins Credential with read access to the secure git repos.') + }) + stringParam('DSL_BRANCH', "{{ ANALYTICS_SCHEDULE_JOBS_DSL_REPO_VERSION | default('master') }}", + 'Branch or version of the DSL repo to checkout and use to generate the jobs.') + stringParam('SECURE_BRANCH', "{{ ANALYTICS_SCHEDULE_SECURE_REPO_VERSION | default('master') }}", + 'Branch or version of the secure repo to checkout and use to generate the jobs.') + textParam('COMMON_VARS', "{{ ANALYTICS_SCHEDULE_COMMON_VARS | default('') }}", + 'Set default values for the common job parameters. Format as YAML or provide YAML file as @path/to/file.yml, ' + + ' absolute or relative to seed job workpace.') + {% for task in jenkins_seed_job.analytics_tasks %} + booleanParam('{{ task.id }}', + {{ task.enable | ternary('true', 'false') }}, + 'Create or update this analytics task job.' + ) + textParam('{{ task.id }}_EXTRA_VARS', + "{{ task.extra_vars }}", + 'Default values for the analytics task job parameters. Format as YAML, or provide YAML file as @path/to/file.yml, absolute or relative to seed job workpace.' + ) + {% endfor %} + } + steps { + gradle { + useWrapper(true) + makeExecutable(false) + {% for task in jenkins_seed_job.dsl.gradle_tasks %} + tasks('{{ task }}') + {% endfor %} + } + dsl { + removeAction('{{ jenkins_seed_job.dsl.removed_job_action }}') + removeViewAction('{{ jenkins_seed_job.dsl.removed_view_action }}') + additionalClasspath($/{{ jenkins_seed_job.dsl.additional_classpath }}/$) + lookupStrategy('SEED_JOB') + {% for job in jenkins_seed_job.dsl.target_jobs %} + external('{{ job }}') + {% endfor %} + } + } + keepDependencies(false) + disabled(false) + configure { project -> + canRoam(true) + } +} diff --git a/playbooks/roles/jenkins_common/defaults/main.yml b/playbooks/roles/jenkins_common/defaults/main.yml new file mode 100644 index 00000000000..530de2bd429 --- /dev/null +++ b/playbooks/roles/jenkins_common/defaults/main.yml @@ -0,0 +1,305 @@ +jenkins_common_user: jenkins +jenkins_common_group: jenkins +jenkins_common_groups: '{{ jenkins_common_group }}' +jenkins_common_home: /var/lib/jenkins +jenkins_common_config_path: '{{ jenkins_common_home }}/init-configs' +jenkins_common_port: 8080 +JENKINS_COMMON_VERSION: jenkins_1.651.3 +jenkins_common_war_source: https://s3.amazonaws.com/edx-testeng-tools/jenkins +jenkins_common_nginx_port: 80 +jenkins_common_protocol_https: true +# When checking if Jenkins is finished initializing, expect a 200 as it should +# be publicly available +jenkins_common_ready_status_code: 200 +# Always start Jenkins in Quiet/Maintenance mode +start_jenkins_in_quiet_mode: true + +JENKINS_SERVER_NAME: jenkins.example.org +jenkins_node_usage: EXCLUSIVE + +jenkins_common_debian_pkgs: + - nginx + - git + - curl + - maven + - daemon + - psmisc + - software-properties-common + +jenkins_common_snap_pkgs: [] + +jenkins_common_python_versions: [] + +jenkins_common_configuration_git_url: https://github.com/edx/jenkins-configuration.git +jenkins_common_jenkins_configuration_branch: master +jenkins_common_configuration_src_path: src/main/groovy +jenkins_common_git_home: '{{ jenkins_common_home }}/git' + +jenkins_common_configuration_scripts: [] +jenkins_common_non_plugin_template_files: + - credentials + - ec2_config + - email_ext_config + - ghprb_config + - git_config + - github_config + - github_oauth + - groovy_config + - job_config_history + - log_config + - mailer_config + - main_config + - mask_passwords_config + - properties_config + - python_config + - security + - seed_config + - slack_config + - splunk_config + - timestamper_config + +# Jenkins default config values +jenkins_common_jvm_args: '' + +# Users +JENKINS_USER_LIST: [] + +# main +jenkins_common_main_system_message: '' +jenkins_common_main_num_executors: 1 +jenkins_common_main_labels: + - 'dsl-seed-runner' + - 'backup-runner' +jenkins_common_main_quiet_period: 5 +jenkins_common_main_scm_retry: 2 +jenkins_common_main_disable_remember: true +jenkins_common_main_env_vars: + - NAME: 'GITHUB_OWNER_WHITELIST' + VALUE: '{{ JENKINS_MAIN_GITHUB_OWNER_WHITELIST }}' +jenkins_common_main_executable: '/bin/bash' +jenkins_common_formatter_type: 'rawhtml' +jenkins_common_disable_syntax_highlighting: false + +# system properties +jenkins_common_system_properties: + - KEY: "hudson.footerURL" + VALUE: "http://www.example.com/" + +JENKINS_MAIN_URL: 'https://jenkins.example.org/' +JENKINS_MAIN_ADMIN_EMAIL: 'jenkins ' + +# global tool configurations +jenkins_common_groovy_installations: + - NAME: 'DEFAULT_GROOVY' + HOME: '' + VERSION: '2.5.1' +jenkins_common_python_installations: + - PYTHON_ALIAS: 'System-CPython-2.7' + PYTHON_PATH: '/usr/bin/python2.7' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'PYTHON_3.5' + PYTHON_PATH: '/usr/bin/python3.5' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'PYTHON_3.6' + PYTHON_PATH: '/usr/bin/python3.6' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'PYTHON_3.8' + PYTHON_PATH: '/usr/bin/python3.8' + PYTHON_PROPERTIES: [] + +# plugins +jenkins_common_plugins_list: [] + +# ec2 +jenkins_common_use_instance_profile_for_creds: false +jenkins_common_instance_cap: '' + +JENKINS_EC2_PRIVATE_KEY: '' +JENKINS_EC2_REGION: '' +JENKINS_EC2_CREDENTIAL_ID: '' +JENKINS_EC2_ROLE_ARN: '' +JENKINS_EC2_ROLE_SESSION_NAME: '' +JENKINS_EC2_AMIS: [] + +# ghprb +jenkins_common_ghprb_server: 'https://api.github.com/' + +jenkins_common_ghprb_request_testing: '' +jenkins_common_ghprb_white_list_phrase: '' +jenkins_common_ghprb_ok_phrase: '' +jenkins_common_ghprb_retest_phrase: '' +jenkins_common_ghprb_skip_phrase: '' +jenkins_common_ghprb_cron_schedule: '' + +jenkins_common_ghprb_use_comments: false +jenkins_common_ghprb_use_detailed_comments: false +jenkins_common_ghprb_manage_webhooks: false +jenkins_common_ghprb_failure_as: 'failure' +jenkins_common_ghprb_auto_close_fails: false +jenkins_commmon_ghprb_display_errors: false + +jenkins_common_ghprb_github_auth: '' +jenkins_common_ghprb_cancel_build_on_update: true +jenkins_common_ghprb_simple_status: '' +jenkins_common_ghprb_publish_jenkins_url: '' +jenkins_common_ghprb_build_log_lines: +jenkins_common_ghprb_results: + - STATUS: 'FAILURE' + MESSAGE: 'Test FAILed.' + - STATUS: 'SUCCESS' + MESSAGE: 'Test PASSed.' +JENKINS_GHPRB_ADMIN_LIST: [] +JENKINS_GHPRB_CREDENTIAL_ID: '' +JENKINS_GHPRB_SHARED_SECRET: '' +JENKINS_GHPRB_BLACK_LIST: [] +JENKINS_GHPRB_WHITE_LIST: [] +JENKINS_MAIN_GITHUB_OWNER_WHITELIST: [] +JENKINS_GHPRB_BLACK_LIST_AUTHORS: '' + +# credentials +JENKINS_SECRET_FILES_LIST: [] +JENKINS_USERNAME_PASSWORD_LIST: [] +JENKINS_SECRET_TEXT_LIST: [] +JENKINS_CERTIFICATES_LIST: [] +JENKINS_SSH_LIST: [] +JENKINS_AWS_LIST: [] + +# security +jenkins_common_dsl_script_security_enabled: true +jenkins_common_security_agent_protocols: + - 'JNLP4-connect' +jenkins_common_security_agent_jnlp_tcp_port: 0 +JENKINS_CSRF_PROTECTION_ENABLED: false +# proxy compatibility will exclude the client ip from crumbs. +# this is useful is dealing with a proxy that filters them +# in requests +JENKINS_CSRF_PROXY_COMPATIBILITY: false + +JENKINS_SECURITY_GROUPS: [] + +# git +JENKINS_GIT_NAME: 'jenkins' +JENKINS_GIT_EMAIL: 'jenkins@example.com' + +# github +jenkins_common_github_configs: + - CREDENTIAL_ID: '' + MANAGE_HOOKS: false + USE_CUSTOM_API_URL: false + GITHUB_API_URL: '' + CACHE_SIZE: 20 + +# github oauth settings +jenkins_common_security_scopes: 'read:org,user:email' + +JENKINS_SECURITY_CLIENT_ID: '' +JENKINS_SECURITY_CLIENT_SECRET: '' + +# seed +jenkins_common_seed_name: 'seed_job' +jenkins_common_seed_path: '{{ jenkins_common_config_path }}/xml/seed_job.xml' +jenkins_common_seed_job_source: '{{ role_path }}/files/xml/*' + +# logs +jenkins_common_log_list: + - LOG_RECORDER: 'Sample Log' + LOGGERS: + - name: 'org.jenkinsci.plugins.example.Class' + log_level: 'ALL' + +# job config history +jenkins_common_history_root: '' +jenkins_common_history_max_entries: '' +jenkins_common_history_max_days: '' +jenkins_common_history_max_entries_page: '' +jenkins_common_history_skip_duplicates: true +jenkins_common_history_exclude_pattern: '' +jenkins_common_history_save_module_config: false +jenkins_common_history_show_build_badges: 'always' +jenkins_common_history_excluded_users: '' + +# mailer +jenkins_common_mailer_port: 465 +jenkins_common_mailer_use_ssl: true +jenkins_common_mailer_char_set: 'UTF-8' +JENKINS_MAILER_SMTP_SERVER: '' +JENKINS_MAILER_REPLY_TO_ADDRESS: 'jenkins' +JENKINS_MAILER_DEFAULT_SUFFIX: '@example.com' +JENKINS_MAILER_SMTP_AUTH_USERNAME: '' +JENKINS_MAILER_SMTP_AUTH_PASSWORD: '' + +# email ext plugin +jenkins_common_email_advanced_properties: '' +jenkins_common_email_content_type: 'text/plain' +jenkins_common_default_email_subject: '${PROJECT_NAME} #${BUILD_NUMBER} is ${BUILD_STATUS}' +jenkins_common_email_emergency_reroute: '' +jenkins_common_email_replyto: '' +jenkins_common_email_debug_mode: 'false' +jenkins_common_email_max_attachment_size: 10 +jenkins_common_email_default_recipients: '' +jenkins_common_email_add_precedence_bulk: 'true' +jenkins_common_email_allowed_domains: '' +jenkins_common_email_excluded_committers: '' +jenkins_common_email_require_admin_for_template_testing: 'true' +jenkins_common_email_watching_enabled: '' +jenkins_common_email_allow_unregistered_enabled: '' +jenkins_common_email_use_list_id: '' +jenkins_common_email_list_id: '' +jenkins_common_email_triggers: + - 'AbortedTrigger' + - 'FailureTrigger' + - 'FixedTrigger' +# if you wish to set the following 3 values, supply paths to +# individual files with the content you want to specify +jenkins_common_email_default_body_path: '' +jenkins_common_email_default_presend_script_path: '' +jenkins_common_email_default_postsend_script_path: '' + +# mask passwords +JENKINS_MASK_PASSWORDS_CLASSES: [] +JENKINS_MASK_PASSWORDS_PAIRS: [] + +# SAML +JENKINS_SAML_IDP_METADATA: | + dummy data +JENKINS_SAML_DISPLAY_NAME_ATTRIBUTE: '' +JENKINS_SAML_GROUP_ATTRIBUTE: '' +JENKINS_SAML_MAX_AUTH_LIFETIME_SECONDS: 60 +JENKINS_SAML_USERNAME_ATTRIBUTE: '' +JENKINS_SAML_EMAIL_ATTRIBUTE: '' +JENKINS_SAML_LOGOUT_URL: '' +JENKINS_SAML_ADVANCED_CONFIGURATION: {} +JENKINS_SAML_ENCRYPTION_DATA: {} +JENKINS_SAML_USERNAME_CASE_CONVENTION: 'None' +JENKINS_SAML_BINDING: 'POST' +JENKINS_SAML_CUSTOM_ATTRIBUTES: [] + +# SLACK +JENKINS_SLACK_BASE_URL: '' +JENKINS_IS_SLACK_BOT: 'true' +JENKINS_SLACK_ROOM: '' +JENKINS_SLACK_TEAM_DOMAIN: '' +JENKINS_SLACK_CREDENTIAL_ID: '' + +# splunk +jenkins_common_splunk_enabled: true +jenkins_common_splunk_use_ssl: true +jenkins_common_splunk_raw_events_enabled: true +jenkins_common_splunk_batch_size: 4096 +jenkins_common_splunk_retries: 3 +jenkins_common_splunk_ignored_jobs: '' +jenkins_common_splunk_master_hostname: '' +jenkins_common_splunk_event_source: '' +jenkins_common_splunk_script_type: 'path' +jenkins_common_splunk_script_path: '' +jenkins_common_splunk_file_path: '{{ role_path }}/files/splunk/*' +jenkins_common_splunk_metadata: [] +JENKINS_SPLUNK_HOSTNAME: '' +JENKINS_SPLUNK_PORT: 8088 +JENKINS_SPLUNK_APP_URL: '' +JENKINS_SPLUNK_TOKEN: '' + +# timestamper +jenkins_common_timestamper_system_clock_format: "''HH:mm:ss' '" +jenkins_common_timestamper_elapsed_time_format: "''HH:mm:ss.S' '" +jenkins_common_timestamper_enabled_on_pipelines: true diff --git a/playbooks/roles/jenkins_common/files/ec2/mongo_init_script.sh b/playbooks/roles/jenkins_common/files/ec2/mongo_init_script.sh new file mode 100644 index 00000000000..7f39cb61833 --- /dev/null +++ b/playbooks/roles/jenkins_common/files/ec2/mongo_init_script.sh @@ -0,0 +1,16 @@ +# This confirms that mongo is running and is accessible on localhost +# It could expose internal network problems, in which case the worker should not be used +# Mongo seems to spend a bit of time starting. +i=0 + +while [ $i -lt 45 ]; do + mongo --quiet --eval 'db.getMongo().getDBNames()' 2>/dev/null 1>&2 + if [ $? -eq 0 ]; then + break + else + sleep 2 + i=$[$i+1] + fi +done + +mongo --quiet --eval 'db.getMongo().getDBNames()' diff --git a/playbooks/roles/jenkins_common/files/splunk/splunk.groovy b/playbooks/roles/jenkins_common/files/splunk/splunk.groovy new file mode 100644 index 00000000000..ac3de9ef2fb --- /dev/null +++ b/playbooks/roles/jenkins_common/files/splunk/splunk.groovy @@ -0,0 +1,2 @@ +//send job metadata and junit reports with page size set to 50 (each event contains max 50 test cases) +splunkins.sendTestReport(50) diff --git a/playbooks/roles/jenkins_common/files/xml/seed_job.xml b/playbooks/roles/jenkins_common/files/xml/seed_job.xml new file mode 100644 index 00000000000..018e113ee14 --- /dev/null +++ b/playbooks/roles/jenkins_common/files/xml/seed_job.xml @@ -0,0 +1,98 @@ + + + + Run one dsl job at a time. + false + + + + -1 + 20 + -1 + -1 + + + + false + -1 + + + false + false + + + + + DSL_SCRIPT + Path to dsl script to run, from the root of the https://github.com/edx/jenkins-job-dsl repo (i.e. sample/jobs/sampleJob.groovy) + sample/jobs/sampleJob.groovy + + + BRANCH + Branch of jenkins-job-dsl repo to use + */master + + + + + + 2 + + + https://github.com/edx/jenkins-job-dsl.git + + + + + ${BRANCH} + + + false + + + + dsl-seed-runner + false + false + false + false + + false + + + #!/usr/bin/env bash + + # exit if user-supplied parameter does not exisit + if [ ! -e ${DSL_SCRIPT} ]; then + echo "DSL Script '{DSL_SCRIPT}' does not exist. Please try again" + exit 1 + fi + + + + tert + + libs +assemble + + + (Default) + true + true + true + true + + + ${DSL_SCRIPT} + false + false + IGNORE + IGNORE + JENKINS_ROOT + lib/snakeyaml-1.17.jar +src/main/groovy + + + + + diff --git a/playbooks/roles/jenkins_common/meta/main.yml b/playbooks/roles/jenkins_common/meta/main.yml new file mode 100644 index 00000000000..cc207a01175 --- /dev/null +++ b/playbooks/roles/jenkins_common/meta/main.yml @@ -0,0 +1,21 @@ +--- +dependencies: + - common + - role: nginx + nginx_app_dir: "/etc/nginx" + nginx_log_dir: "/var/log/nginx" + nginx_data_dir: "{{ nginx_app_dir }}" + nginx_conf_dir: "{{ nginx_app_dir }}/conf.d" + nginx_sites_available_dir: "{{ nginx_app_dir }}/sites-available" + nginx_sites_enabled_dir: "{{ nginx_app_dir }}/sites-enabled" + nginx_server_static_dir: "{{ nginx_data_dir }}/server-static" + nginx_htpasswd_file: "{{ nginx_app_dir }}/nginx.htpasswd" + nginx_default_sites: "jenkins" + nginx_sites: jenkins + jenkins_nginx_port: "{{ jenkins_common_nginx_port }}" + jenkins_server_name: "{{ JENKINS_SERVER_NAME }}" + jenkins_port: "{{ jenkins_common_port }}" + jenkins_protocol_https: "{{ jenkins_common_protocol_https }}" + tags: jenkins:promote-to-production + - role: oraclejdk + tags: java diff --git a/playbooks/roles/jenkins_common/tasks/main.yml b/playbooks/roles/jenkins_common/tasks/main.yml new file mode 100644 index 00000000000..417193e4c61 --- /dev/null +++ b/playbooks/roles/jenkins_common/tasks/main.yml @@ -0,0 +1,466 @@ +--- +- name: Install jenkins specific system packages + apt: + name: '{{ item }}' + state: present + update_cache: yes + with_items: '{{ jenkins_common_debian_pkgs }}' + tags: + - jenkins + - install + - install:system-requirements + +- name: Install jenkins specific snap packages + command: 'snap install {{ item }} --classic' + with_items: '{{ jenkins_common_snap_pkgs }}' + tags: + - jenkins + - install + - install:system-requirements + +# Certain versions of Python are not available as apt-packages, +# depending on your distribution. Use the deadsnakes PPA to have +# access to them +- name: add deadsnakes PPA for newer Python versions + apt_repository: + repo: "ppa:deadsnakes/ppa" + update_cache: yes + tags: + - jenkins + - install + - install:system-requirements + +# Install newer versions of python for testing, but do not set them +# as the default version +- name: Install specific versions of python + apt: + name: '{{ item }}' + state: present + update_cache: yes + with_items: '{{ jenkins_common_python_versions }}' + tags: + - jenkins + - install + - install:system-requirements + +# Install 'dev' packages for each version of python that is installed +- name: Install python dev packages + apt: + name: '{{ item }}-dev' + state: present + update_cache: yes + with_items: '{{ jenkins_common_python_versions }}' + tags: + - jenkins + - install + - install:system-requirements + +- name: Install extra packages required for python3.8 + apt: + name: '{{ item }}' + state: present + update_cache: yes + when: '"python3.8" in jenkins_common_python_versions' + with_list: + - 'python3.8-lib2to3' + - 'python3.8-distutils' + tags: + - jenkins + - install + - install:system-requirements + +- name: Create jenkins group with specified gid + group: + name: '{{ jenkins_common_group }}' + gid: '{{ jenkins_common_group_gid }}' + state: present + when: jenkins_common_group_gid is defined + tags: + - install + - install:system-requirements + +- name: Create jenkins group + group: + name: '{{ jenkins_common_group }}' + state: present + when: jenkins_common_group_gid is not defined or not jenkins_common_group_gid + tags: + - install + - install:system-requirements + +- name: Create the jenkins user with specified uid and add to the group + user: + name: '{{ jenkins_common_user }}' + append: yes + uid: '{{ jenkins_common_user_uid }}' + groups: '{{ jenkins_common_groups }}' + when: jenkins_common_user_uid is defined + tags: + - install + - install:system-requirements + +- name: Create the jenkins user and add to the group + user: + name: '{{ jenkins_common_user }}' + append: yes + groups: '{{ jenkins_common_groups }}' + when: jenkins_common_user_uid is not defined or not jenkins_common_user_uid + tags: + - install + - install:system-requirements + +- name: Create jenkins home and set ownership + file: + path: "{{ jenkins_common_home }}" + state: directory + owner: "{{ jenkins_common_user }}" + group: "{{ jenkins_common_group }}" + mode: 0700 + tags: + - install + - install:system-requirements + +- name: set nofile soft limit for the user jenkins + pam_limits: + domain: "{{ jenkins_common_user }}" + limit_type: soft + limit_item: nofile + value: 4096 + tags: + - install + - install:system-requirements + +- name: set nofile hard limit for the user jenkins + pam_limits: + domain: "{{ jenkins_common_user }}" + limit_type: hard + limit_item: nofile + value: 8096 + tags: + - install + - install:system-requirements + +- name: Create /var/run/jenkins + file: + path: "/var/run/jenkins" + state: directory + owner: "{{ jenkins_common_user }}" + group: "{{ jenkins_common_group }}" + tags: + - install + - install:system-requirements + +- name: Delete any existing jenkins-configuration folders to avoid unwanted configuration + file: + path: '{{ item }}' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + state: absent + with_items: + - '{{ jenkins_common_home }}/init.groovy.d' + - '{{ jenkins_common_home }}/plugins' + - '{{ jenkins_common_home }}/utils' + - '{{ jenkins_common_config_path }}' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Create necessary folders + file: + path: '{{ item }}' + state: directory + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + with_items: + - /usr/share/jenkins + - '{{ jenkins_common_home }}/init.groovy.d' + - '{{ jenkins_common_config_path }}' + - '{{ jenkins_common_home }}/utils' + - '{{ jenkins_common_home }}/plugins' + - '{{ jenkins_common_git_home }}' + - /var/log/jenkins + - /var/cache/jenkins + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Repository Signing Key for Jenkins 2.235.3 + apt_key: + url: "https://pkg.jenkins.io/debian-stable/jenkins.io.key" + state: present + tags: + - install + - install:app-requirements + become: yes + +- name: Download Jenkins war file + get_url: + url: '{{ jenkins_common_war_source }}/{{ JENKINS_COMMON_VERSION }}.war' + dest: /usr/share/jenkins/jenkins.war + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + force: yes + tags: + - install + - install:app-requirements + +- name: Add Jenkins systemd configuration + template: + src: "etc/systemd/system/jenkins.service.j2" + dest: "/etc/systemd/system/jenkins.service" + tags: + - install + - install:system-requirements + +- name: Configure logrotate for jenkins application log + template: + src: "etc/logrotate.d/jenkins_log.j2" + dest: "/etc/logrotate.d/jenkins" + tags: + - install + - install:system-requirements + +- name: Add env vars + template: + src: "jenkins-env.sh.j2" + dest: "/etc/profile.d/jenkins-env.sh" + owner: root + group: root + mode: "0755" + tags: + - install + - install:base + +- name: Download jenkins-configuration repo + git: + repo: '{{ jenkins_common_configuration_git_url }}' + dest: '{{ jenkins_common_git_home }}/jenkins-configuration' + version: '{{ jenkins_common_jenkins_configuration_branch }}' + become: true + become_user: '{{ jenkins_common_user }}' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Run gradle libs + shell: './gradlew libs' + args: + chdir: '{{ jenkins_common_git_home }}/jenkins-configuration' + environment: + UTILS_PATH: '{{ jenkins_common_home }}/utils' + JENKINS_VERSION: '{{ JENKINS_COMMON_VERSION }}' + become: true + become_user: '{{ jenkins_common_user }}' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Copy init scripts into init.groovy.d + command: 'cp {{ jenkins_common_git_home }}/jenkins-configuration/{{ jenkins_common_configuration_src_path }}/{{ item }} {{ jenkins_common_home }}/init.groovy.d/' + with_items: '{{ jenkins_common_configuration_scripts }}' + become: true + become_user: '{{ jenkins_common_user }}' + register: init_scripts_copied + tags: + - install + - install:base + - install:plugins + +- name: Copy all init scripts other than oauth and security for local dev + command: 'cp {{ jenkins_common_git_home }}/jenkins-configuration/{{ jenkins_common_configuration_src_path }}/{{ item }} {{ jenkins_common_home }}/init.groovy.d/' + with_items: '{{ jenkins_common_configuration_scripts }}' + become: true + become_user: '{{ jenkins_common_user }}' + when: 'item != "4configureGHOAuth.groovy" and item != "4configureSecurity.groovy" and init_scripts_copied is not defined' + tags: + - jenkins:local-dev + +- name: Create jenkins config sub folders + file: + path: '{{ item }}' + state: directory + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + with_items: + - '{{ jenkins_common_config_path }}/credentials' + - '{{ jenkins_common_config_path }}/ec2' + - '{{ jenkins_common_config_path }}/xml' + - '{{ jenkins_common_config_path }}/splunk' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Copy non plugins template files + template: + src: '{{ role_path }}/templates/config/{{ item }}.yml.j2' + dest: '{{ jenkins_common_config_path }}/{{ item }}.yml' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + with_items: '{{ jenkins_common_non_plugin_template_files }}' + register: templates_copied + tags: + - install + - install:base + - install:plugins + +- name: For local dev, copy any config files other than oauth and security + template: + src: '{{ role_path }}/templates/config/{{ item }}.yml.j2' + dest: '{{ jenkins_common_config_path }}/{{ item }}.yml' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + with_items: '{{ jenkins_common_non_plugin_template_files }}' + when: 'item != "github_oauth" and item != "security" and templates_copied is not defined' + tags: + - jenkins:local-dev + +- name: Update Github OAUTH settings when promoting jenkins instance to production + template: + src: '{{ role_path }}/templates/config/security.yml.j2' + dest: '{{ jenkins_common_config_path }}/security.yml' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + when: '"security" in jenkins_common_non_plugin_template_files and templates_copied is not defined' + tags: + - jenkins:promote-to-production + +- name: Copy plugins.yml config file + template: + src: '{{ role_path }}/templates/config/plugins.yml.j2' + dest: '{{jenkins_common_config_path }}/plugins.yml' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Copy ec2 config files + template: + src: '{{ item }}' + dest: '{{ jenkins_common_config_path }}/ec2/' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + with_fileglob: + - '{{ role_path }}/files/ec2/*' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Copy xml config files + template: + src: '{{ item }}' + dest: '{{ jenkins_common_config_path }}/xml/' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + with_fileglob: + - '{{ jenkins_common_seed_job_source }}' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Copy splunk config script + template: + src: '{{ item }}' + dest: '{{ jenkins_common_config_path }}/splunk/' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + with_fileglob: + - '{{ jenkins_common_splunk_file_path }}' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Run plugins.gradle to download plugins + shell: './gradlew -b plugins.gradle plugins' + args: + chdir: '{{ jenkins_common_git_home }}/jenkins-configuration' + environment: + PLUGIN_OUTPUT_DIR: '{{ jenkins_common_home }}/plugins' + PLUGIN_CONFIG: '{{ jenkins_common_config_path }}/plugins.yml' + become: true + become_user: '{{ jenkins_common_user }}' + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Copy secret file credentials + copy: + content: "{{ item.content }}" + dest: '{{ jenkins_common_config_path }}/credentials/{{ item.name }}' + with_items: '{{ JENKINS_SECRET_FILES_LIST }}' + no_log: yes + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Copy ec2 key + copy: + content: '{{ JENKINS_EC2_PRIVATE_KEY }}' + dest: '{{ jenkins_common_config_path }}/ec2/id_rsa' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + no_log: yes + tags: + - install + - install:base + - install:plugins + - jenkins:local-dev + +- name: Start Jenkins Service + systemd: + name: jenkins + daemon_reload: yes + state: restarted + enabled: yes + tags: + - manage + - manage:start + - install:plugins + - jenkins:promote-to-production + +- name: Wait until the Jenkins service has fully initialized + uri: + url: "http://127.0.0.1:{{ jenkins_common_port }}" + status_code: '{{ jenkins_common_ready_status_code }}' + register: result + until: result.status == jenkins_common_ready_status_code + retries: 600 + delay: 1 + tags: + - install:base + - install:plugins + +- name: Delete any existing jenkins-configuration folders to avoid unwanted configuration + file: + path: '{{ item }}' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + state: absent + with_items: + - '{{ jenkins_common_home }}/init.groovy.d' + - '{{ jenkins_common_config_path }}' + tags: + - install:base + - install:plugins diff --git a/playbooks/roles/jenkins_common/templates/config/credentials.yml.j2 b/playbooks/roles/jenkins_common/templates/config/credentials.yml.j2 new file mode 100644 index 00000000000..729b14433c1 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/credentials.yml.j2 @@ -0,0 +1,52 @@ +--- +{% for file in JENKINS_SECRET_FILES_LIST %} +- credentialType: 'secretFile' + scope: '{{ file.scope }}' + name: '{{ file.name }}' + path: 'credentials/{{ file.name }}' + description: '{{ file.description }}' + id: '{{ file.id }}' +{% endfor %} +{% for userPass in JENKINS_USERNAME_PASSWORD_LIST %} +- credentialType: 'usernamePassword' + scope: '{{ userPass.scope }}' + username: '{{ userPass.username }}' + password: '{{ userPass.password }}' + description: '{{ userPass.description }}' + id: '{{ userPass.id }}' +{% endfor %} +{% for text in JENKINS_SECRET_TEXT_LIST %} +- credentialType: 'secretText' + scope: '{{ text.scope }}' + secretText: '{{ text.secretText }}' + description: '{{ text.description }}' + id: '{{ text.id }}' +{% endfor %} +{% for cert in JENKINS_CERTIFICATES_LIST %} +- credentialType: 'certificate' + scope: '{{ cert.scope }}' + path: '{{ cert.path }}' + password: ''{{ cert.password }}' + description: '{{ cert.description }}' + id: '{{ cert.id }}' +{% endfor %} +{% for ssh in JENKINS_SSH_LIST %} +- credentialType: 'ssh' + scope: '{{ ssh.scope }}' + username: '{{ ssh.username }}' + sshKey: | + {{ ssh.sshKey | indent(4) }} + passphrase: '{{ ssh.passphrase }}' + description: '{{ ssh.description }}' + id: '{{ ssh.id }}' +{% endfor %} +{% for aws in JENKINS_AWS_LIST %} +- credentialType: 'aws' + scope: '{{ aws.scope }}' + id: '{{ aws.id }}' + accessKeyId: '{{ aws.accessKeyId }}' + secretAccessKey: '{{ aws.secretAccessKey }}' + description: '{{ aws.description }}' + iamRole: '{{ aws.iamRole }}' + mfaSerialNumber: '{{ aws.iamRole }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/ec2_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/ec2_config.yml.j2 new file mode 100644 index 00000000000..efbe86cdd1c --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/ec2_config.yml.j2 @@ -0,0 +1,57 @@ +--- +CLOUDS: + - NAME: '{{ JENKINS_EC2_REGION }}' + CREDENTIAL_ID: '{{ JENKINS_EC2_CREDENTIAL_ID }}' + USE_INSTANCE_PROFILE_FOR_CREDS: {{ jenkins_common_use_instance_profile_for_creds }} + REGION: '{{ JENKINS_EC2_REGION }}' + EC2_PRIVATE_KEY_PATH: '{{ jenkins_common_config_path }}/ec2/id_rsa' + INSTANCE_CAP: '{{ jenkins_common_instance_cap }}' + ROLE_ARN: '{{ JENKINS_EC2_ROLE_ARN }}' + ROLE_SESSION_NAME: '{{ JENKINS_EC2_ROLE_SESSION_NAME }}' + AMIS: +{% for ami in JENKINS_EC2_AMIS %} + - AMI_ID: '{{ ami.AMI_ID }}' + AVAILABILITY_ZONE: '{{ ami.AVAILABILITY_ZONE }}' + SPOT_CONFIG: + SPOT_MAX_BID_PRICE: '{{ ami.SPOT_CONFIG.SPOT_MAX_BID_PRICE }}' + SPOT_INSTANCE_BID_TYPE: '{{ ami.SPOT_CONFIG.SPOT_INSTANCE_BID_TYPE }}' + SECURITY_GROUPS: '{{ ami.SECURITY_GROUPS }}' + REMOTE_FS_ROOT: '{{ ami.REMOTE_FS_ROOT }}' + INSTANCE_TYPE: '{{ ami.INSTANCE_TYPE }}' + LABEL_STRING: '{{ ami.LABEL_STRING }}' + MODE: '{{ ami.MODE }}' + DESCRIPTION: '{{ ami.DESCRIPTION }}' + INIT_SCRIPT_PATH: '{{ ami.INIT_SCRIPT_PATH }}' + TEMP_DIR: '{{ ami.TEMP_DIR }}' + USER_DATA: '{{ ami.USER_DATA }}' + NUM_EXECUTORS: '{{ ami.NUM_EXECUTORS }}' + REMOTE_ADMIN: '{{ ami.REMOTE_ADMIN }}' + JVM_OPTIONS: '{{ ami.JVM_OPTIONS }}' + STOP_ON_TERMINATE: {{ ami.STOP_ON_TERMINATE }} + SUBNET_ID: '{{ ami.SUBNET_ID }}' + TAGS: +{% for tag in ami.TAGS %} + - NAME: '{{ tag.NAME }}' + VALUE: '{{ tag.VALUE }}' +{% endfor %} + IDLE_TERMINATION_MINUTES: '{{ ami.IDLE_TERMINATION_MINUTES }}' + USE_PRIVATE_DNS_NAME: {{ ami.USE_PRIVATE_DNS_NAME }} + INSTANCE_CAP: '{{ ami.INSTANCE_CAP }}' + IAM_INSTANCE_PROFILE: '{{ ami.IAM_INSTANCE_PROFILE }}' + USE_EPHEMERAL_DEVICES: {{ ami.USE_EPHEMERAL_DEVICES }} + LAUNCH_TIMEOUT: '{{ ami.LAUNCH_TIMEOUT }}' + EBS_OPTIMIZED: {{ ami.EBS_OPTIMIZED }} + DELETE_ROOT_ON_TERMINATION: {{ ami.DELETE_ROOT_ON_TERMINATION }} + AMI_TYPE: + ROOT_COMMAND_PREFIX: '{{ ami.AMI_TYPE.ROOT_COMMAND_PREFIX }}' + SLAVE_COMMAND_PREFIX: '{{ ami.AMI_TYPE.SLAVE_COMMAND_PREFIX }}' + SLAVE_COMMAND_SUFFIX: '{{ ami.AMI_TYPE.SLAVE_COMMAND_SUFFIX }}' + REMOTE_SSH_PORT: '{{ ami.AMI_TYPE.REMOTE_SSH_PORT }}' + USE_DEDICATED_TENANCY: {{ ami.USE_DEDICATED_TENANCY }} + ASSOCIATE_PUBLIC_IP: {{ ami.ASSOCIATE_PUBLIC_IP }} + CUSTOM_DEVICE_MAPPING: '{{ ami.CUSTOM_DEVICE_MAPPING }}' + USE_EXTERNAL_SSH_PROCESS: {{ ami.USE_EXTERNAL_SSH_PROCESS }} + CONNECT_WITH_PUBLIC_IP: {{ ami.CONNECT_WITH_PUBLIC_IP }} + MONITORING: {{ ami.MONITORING }} + T2_UNLIMITED: {{ ami.T2_UNLIMITED }} +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/email_ext_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/email_ext_config.yml.j2 new file mode 100644 index 00000000000..aaad6cf1743 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/email_ext_config.yml.j2 @@ -0,0 +1,24 @@ +--- +ADV_PROPERTIES: '{{ jenkins_common_email_advanced_properties }}' +DEFAULT_CONTENT_TYPE: '{{ jenkins_common_email_content_type }}' +DEFAULT_SUBJECT: '{{ jenkins_common_default_email_subject }}' +DEFAULT_BODY_PATH: '{{ jenkins_common_email_default_body_path }}' +EMERGENCY_REROUTE: '{{ jenkins_common_email_emergency_reroute }}' +DEFAULT_REPLYTO: '{{ jenkins_common_email_replyto }}' +DEFAULT_PRESEND_SCRIPT_PATH: '{{ jenkins_common_email_default_presend_script_path }}' +DEFAULT_POSTSEND_SCRIPT_PATH: '{{ jenkins_common_email_default_postsend_script_path }}' +DEBUG_MODE: '{{ jenkins_common_email_debug_mode }}' +MAX_ATTACHMENT_SIZE: '{{ jenkins_common_email_max_attachment_size }}' +DEFAULT_RECIPIENTS: '{{ jenkins_common_email_default_recipients }}' +ADD_PRECEDENCE_BULK: '{{ jenkins_common_email_add_precedence_bulk }}' +ALLOWED_DOMAINS: '{{ jenkins_common_email_allowed_domains }}' +EXCLUDED_COMMITTERS: '{{ jenkins_common_email_excluded_committers }}' +REQUIRE_ADMIN_FOR_TEMPLATE_TESTING: '{{ jenkins_common_email_require_admin_for_template_testing }}' +WATCHING_ENABLED: '{{ jenkins_common_email_watching_enabled }}' +ALLOW_UNREGISTERED_ENABLED: '{{ jenkins_common_email_allow_unregistered_enabled }}' +USE_LIST_ID: '{{ jenkins_common_email_use_list_id }}' +LIST_ID': '{{ jenkins_common_email_list_id }}' +TRIGGERS: +{% for trigger in jenkins_common_email_triggers %} + - '{{ trigger }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/ghprb_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/ghprb_config.yml.j2 new file mode 100644 index 00000000000..fb8d499c1c9 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/ghprb_config.yml.j2 @@ -0,0 +1,39 @@ +--- +SERVER_API_URL: '{{ jenkins_common_ghprb_server }}' +ADMIN_LIST: +{% for admin in JENKINS_GHPRB_ADMIN_LIST %} + - '{{ admin }}' +{% endfor %} +REQUEST_TESTING_PHRASE: '{{ jenkins_common_ghprb_request_testing }}' +WHITE_LIST_PHRASE: '{{ jenkins_common_ghprb_white_list_phrase }}' +OK_PHRASE: '{{ jenkins_common_ghprb_ok_phrase }}' +RETEST_PHRASE: '{{ jenkins_common_ghprb_retest_phrase }}' +BLACKLIST_AUTHORS: '{{ JENKINS_GHPRB_BLACK_LIST_AUTHORS }}' +SKIP_PHRASE: '{{ jenkins_common_ghprb_skip_phrase }}' +CRON_SCHEDULE: '{{ jenkins_common_ghprb_cron_schedule }}' +USE_COMMENTS: {{ jenkins_common_ghprb_use_comments }} +USE_DETAILED_COMMENTS: {{ jenkins_common_ghprb_use_detailed_comments }} +MANAGE_WEBHOOKS: {{ jenkins_common_ghprb_manage_webhooks }} +UNSTABLE_AS: '{{ jenkins_common_ghprb_failure_as }}' +AUTO_CLOSE_FAILED_PRS: {{ jenkins_common_ghprb_auto_close_fails }} +DISPLAY_ERRORS_DOWNSTREAM: {{ jenkins_commmon_ghprb_display_errors }} +BLACK_LIST_LABELS: +{% for blacklist in JENKINS_GHPRB_BLACK_LIST %} + - '{{ blacklist }}' +{% endfor %} +WHITE_LIST_LABELS: +{% for whitelist in JENKINS_GHPRB_WHITE_LIST %} + - '{{ whitelist }}' +{% endfor %} +GITHUB_AUTH: '{{ jenkins_common_ghprb_github_auth }}' +CANCEL_BUILD_ON_UPDATE: '{{ jenkins_common_ghprb_cancel_build_on_update }}' +SIMPLE_STATUS: '{{ jenkins_common_ghprb_simple_status }}' +PUBLISH_JENKINS_URL: '{{ jenkins_common_ghprb_publish_jenkins_url }}' +BUILD_LOG_LINES_TO_DISPLAY: {{ jenkins_common_ghprb_build_log_lines }} +RESULT_MESSAGES: +{% for message in jenkins_common_ghprb_results %} + - STATUS: '{{ message.STATUS }}' + MESSAGE: '{{ message.MESSAGE }}' +{% endfor %} +CREDENTIALS_ID: '{{ JENKINS_GHPRB_CREDENTIAL_ID }}' +SHARED_SECRET: '{{ JENKINS_GHPRB_SHARED_SECRET }}' diff --git a/playbooks/roles/jenkins_common/templates/config/git_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/git_config.yml.j2 new file mode 100644 index 00000000000..ec1f369cbfd --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/git_config.yml.j2 @@ -0,0 +1,3 @@ +--- +NAME: '{{ JENKINS_GIT_NAME }}' +EMAIL: '{{ JENKINS_GIT_EMAIL }}' diff --git a/playbooks/roles/jenkins_common/templates/config/github_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/github_config.yml.j2 new file mode 100644 index 00000000000..71b15df1cba --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/github_config.yml.j2 @@ -0,0 +1,8 @@ +--- +{% for config in jenkins_common_github_configs %} +- CREDENTIAL_ID: '{{ config.CREDENTIAL_ID }}' + MANAGE_HOOKS: '{{ config.MANAGE_HOOKS }}' + USE_CUSTOM_API_URL: '{{ config.USE_CUSTOM_API_URL }}' + API_URL: '{{ config.API_URL }}' + CACHE_SIZE: {{ config.CACHE_SIZE}} +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/github_oauth.yml.j2 b/playbooks/roles/jenkins_common/templates/config/github_oauth.yml.j2 new file mode 100644 index 00000000000..7dec70306b1 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/github_oauth.yml.j2 @@ -0,0 +1,6 @@ +--- +GITHUB_WEB_URI: 'https://github.com/' +GITHUB_API_URI: 'https://api.github.com/' +CLIENT_ID: '{{ JENKINS_SECURITY_CLIENT_ID }}' +CLIENT_SECRET: '{{ JENKINS_SECURITY_CLIENT_SECRET }}' +SCOPES: '{{ jenkins_common_security_scopes }}' diff --git a/playbooks/roles/jenkins_common/templates/config/groovy_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/groovy_config.yml.j2 new file mode 100644 index 00000000000..c82d3385e4d --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/groovy_config.yml.j2 @@ -0,0 +1,7 @@ +--- +GROOVY_INSTALLATIONS: +{% for installation in jenkins_common_groovy_installations %} + - NAME: '{{ installation.NAME }}' + HOME: '{{ installation.HOME }}' + VERSION: '{{ installation.VERSION }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/job_config_history.yml.j2 b/playbooks/roles/jenkins_common/templates/config/job_config_history.yml.j2 new file mode 100644 index 00000000000..a7f68d06aeb --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/job_config_history.yml.j2 @@ -0,0 +1,10 @@ +--- +HISTORY_ROOT_DIR: '{{ jenkins_common_history_root }}' +MAX_HISTORY_ENTRIES: '{{ jenkins_common_history_max_entries }}' +MAX_DAYS_TO_KEEP_ENTRIES: '{{ jenkins_common_history_max_days }}' +MAX_ENTRIES_PER_PAGE: '{{ jenkins_common_history_max_entries_page }}' +SKIP_DUPLICATE_HISTORY: '{{ jenkins_common_history_skip_duplicates }}' +EXCLUDE_PATTERN: '{{ jenkins_common_history_exclude_pattern }}' +SAVE_MODULE_CONFIGURATION: '{{ jenkins_common_history_save_module_config }}' +SHOW_BUILD_BADGES: '{{ jenkins_common_history_show_build_badges }}' +EXCLUDED_USERS: '{{ jenkins_common_history_excluded_users }}' diff --git a/playbooks/roles/jenkins_common/templates/config/log_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/log_config.yml.j2 new file mode 100644 index 00000000000..2554176ef4e --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/log_config.yml.j2 @@ -0,0 +1,9 @@ +--- +{% for recorder in jenkins_common_log_list %} +- LOG_RECORDER: '{{ recorder.LOG_RECORDER }}' + LOGGERS: +{% for log in recorder.LOGGERS %} + - name: '{{ log.name }}' + log_level: '{{ log.log_level }}' +{% endfor %} +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/mailer_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/mailer_config.yml.j2 new file mode 100644 index 00000000000..f52e45b54bf --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/mailer_config.yml.j2 @@ -0,0 +1,9 @@ +--- +SMTP_SERVER: '{{ JENKINS_MAILER_SMTP_SERVER }}' +REPLY_TO_ADDRESS: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' +DEFAULT_SUFFIX: '{{ JENKINS_MAILER_DEFAULT_SUFFIX }}' +SMTP_AUTH_USERNAME: '{{ JENKINS_MAILER_SMTP_AUTH_USERNAME }}' +SMTP_AUTH_PASSWORD: '{{ JENKINS_MAILER_SMTP_AUTH_PASSWORD }}' +SMTP_PORT: '{{ jenkins_common_mailer_port }}' +USE_SSL: '{{ jenkins_common_mailer_use_ssl }}' +CHAR_SET: '{{ jenkins_common_mailer_char_set }}' diff --git a/playbooks/roles/jenkins_common/templates/config/main_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/main_config.yml.j2 new file mode 100644 index 00000000000..c1cef5de8ee --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/main_config.yml.j2 @@ -0,0 +1,33 @@ +--- +MAIN: + WORKSPACE_ROOT_DIR: '${ITEM_ROOTDIR}/workspace' + BUILD_RECORD_ROOT_DIR: '${ITEM_ROOTDIR}/builds' + SYSTEM_MESSAGE: '{{ jenkins_common_main_system_message }}' + NUMBER_OF_EXECUTORS: {{ jenkins_common_main_num_executors }} + LABELS: +{% for label in jenkins_common_main_labels %} + - '{{ label }}' +{% endfor %} + USAGE: '{{ jenkins_node_usage }}' + QUIET_PERIOD: {{ jenkins_common_main_quiet_period }} + SCM_RETRY_COUNT: {{ jenkins_common_main_scm_retry }} + DISABLE_REMEMBER_ME: {{ jenkins_common_main_disable_remember }} +GLOBAL_PROPERTIES: + ENVIRONMENT_VARIABLES: +{% for env in jenkins_common_main_env_vars %} + - NAME: '{{ env.NAME }}' + VALUE: '{{ env.VALUE }}' +{% endfor %} + TOOL_LOCATIONS: +LOCATION: + URL: '{{ JENKINS_MAIN_URL }}' + ADMIN_EMAIL: '{{ JENKINS_MAIN_ADMIN_EMAIL }}' +SHELL: + EXECUTABLE: '{{ jenkins_common_main_executable }}' +FORMATTER: + FORMATTER_TYPE: '{{ jenkins_common_formatter_type }}' + DISABLE_SYNTAX_HIGHLIGHTING: {{ jenkins_common_disable_syntax_highlighting }} +CLI: + CLI_ENABLED: false +SETUP_WIZARD: + SETUP_WIZARD_ENABLED: false diff --git a/playbooks/roles/jenkins_common/templates/config/mask_passwords_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/mask_passwords_config.yml.j2 new file mode 100644 index 00000000000..f7963067272 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/mask_passwords_config.yml.j2 @@ -0,0 +1,10 @@ +--- +MASKED_PARAMETER_CLASSES: +{% for class in JENKINS_MASK_PASSWORDS_CLASSES %} + - '{{ class }}' +{% endfor %} +NAME_PASSWORD_PAIRS: +{% for pair in JENKINS_MASK_PASSWORDS_PAIRS %} + - NAME: '{{ pair.NAME }}' + PASSWORD: '{{ pair.PASSWORD }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/plugins.yml.j2 b/playbooks/roles/jenkins_common/templates/config/plugins.yml.j2 new file mode 100644 index 00000000000..4a9b110a1b9 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/plugins.yml.j2 @@ -0,0 +1,7 @@ +--- +{% for plugin in jenkins_common_plugins_list %} +- name: '{{ plugin.name }}' + version: '{{ plugin.version }}' + group: '{{ plugin.group }}' +{% endfor %} + diff --git a/playbooks/roles/jenkins_common/templates/config/properties_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/properties_config.yml.j2 new file mode 100644 index 00000000000..29bf5865453 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/properties_config.yml.j2 @@ -0,0 +1,5 @@ +--- +{% for key_value in jenkins_common_system_properties %} +- KEY: '{{ key_value.KEY }}' + VALUE: "{{ key_value.VALUE }}" +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/python_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/python_config.yml.j2 new file mode 100644 index 00000000000..1d3ff71694d --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/python_config.yml.j2 @@ -0,0 +1,10 @@ +--- +PYTHON_INSTALLATIONS: +{% for installation in jenkins_common_python_installations %} + - PYTHON_ALIAS: '{{ installation.PYTHON_ALIAS }}' + PYTHON_PATH: '{{ installation.PYTHON_PATH }}' + PYTHON_PROPERTIES: [] +{% for property in installation.PYTHON_PROPERTIES %} + - property +{% endfor %} +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/saml_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/saml_config.yml.j2 new file mode 100644 index 00000000000..8f9d75daaba --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/saml_config.yml.j2 @@ -0,0 +1,34 @@ +--- +IDP_METADATA: '{{ JENKINS_SAML_IDP_METADATA }}' +DISPLAY_NAME_ATTRIBUTE: '{{ JENKINS_SAML_DISPLAY_NAME_ATTRIBUTE }}' +GROUP_ATTRIBUTE: '{{ JENKINS_SAML_GROUP_ATTRIBUTE }}' +MAX_AUTH_LIFETIME_SECONDS: '{{ JENKINS_SAML_MAX_AUTH_LIFETIME_SECONDS }}' +USERNAME_ATTRIBUTE: '{{ JENKINS_SAML_USERNAME_ATTRIBUTE }}' +EMAIL_ATTRIBUTE: '{{ JENKINS_SAML_EMAIL_ATTRIBUTE }}' +LOGOUT_URL: '{{ JENKINS_SAML_LOGOUT_URL }}' +{% if JENKINS_SAML_ADVANCED_CONFIGURATION %} +ADVANCED_CONFIGURATION: + FORCE_AUTH: {{ JENKINS_SAML_ADVANCED_CONFIGURATION.FORCE_AUTH }} + CONTEXT_CLASS_REF: '{{ JENKINS_SAML_ADVANCED_CONFIGURATION.CONTEXT_CLASS_REF }}' + ENTITY_ID: '{{ JENKINS_SAML_ADVANCED_CONFIGURATION.ENTITY_ID }}' + MAXIMUM_SESSION_LIFETIME: {{ JENKINS_SAML_ADVANCED_CONFIGURATION.MAXIMUM_SESSION_LIFETIME }} +{% else %} +ADVANCED_CONFIGURATION: {} +{% endif %} +{% if JENKINS_SAML_ENCRYPTION_DATA %} +ENCRYPTION_DATA: + KEY_STORE_PATH: '{{ JENKINS_SAML_ENCRYPTION_DATA.KEY_STORE_PATH }}' + KEY_STORE_PASSWORD: '{{ JENKINS_SAML_ENCRYPTION_DATA.KEY_STORE_PASSWORD }}' + PRIVATE_KEY_PASSWORD: '{{ JENKINS_SAML_ENCRYPTION_DATA.PRIVATE_KEY_PASSWORD }}' + PRIVATE_KEY_ALIAS: '{{ JENKINS_SAML_ENCRYPTION_DATA.PRIVATE_KEY_ALIAS }}' + FORCE_SIGN_REDIRECT_BINDING_AUTH_REQUEST: {{ JENKINS_SAML_ENCRYPTION_DATA.FORCE_SIGN_REDIRECT_BINDING_AUTH_REQUEST }} +{% else %} +ENCRYPTION_DATA: {} +{% endif %} +USERNAME_CASE_CONVENTION: '{{ JENKINS_SAML_USERNAME_CASE_CONVENTION }}' +BINDING: '{{ JENKINS_SAML_BINDING }}' +SAML_CUSTOM_ATTRIBUTES: +{% for attribute in JENKINS_SAML_CUSTOM_ATTRIBUTES %} + - ATTRIBUTE_NAME: '{{ attribute.ATTRIBUTE_NAME }}' + ATTRIBUTE_VALUE: '{{ attribute.ATTRIBUTE_VALUE }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/security.yml.j2 b/playbooks/roles/jenkins_common/templates/config/security.yml.j2 new file mode 100644 index 00000000000..855c0c76abd --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/security.yml.j2 @@ -0,0 +1,22 @@ +--- +AGENT_SETTINGS: + PROTOCOLS: +{% for protocol in jenkins_common_security_agent_protocols %} + - {{ protocol }} +{% endfor %} + JNLP_TCP_PORT: {{ jenkins_common_security_agent_jnlp_tcp_port }} +SECURITY_GROUPS: +{% for group in JENKINS_SECURITY_GROUPS %} + - NAME: '{{ group.NAME }}' + PERMISSIONS: +{% for permission in group.PERMISSIONS %} + - {{ permission }} +{% endfor %} + USERS: +{% for user in group.USERS %} + - {{ user }} +{% endfor %} +{% endfor %} +DSL_SCRIPT_SECURITY_ENABLED: {{ jenkins_common_dsl_script_security_enabled }} +CSRF_PROTECTION_ENABLED: {{ JENKINS_CSRF_PROTECTION_ENABLED }} +CSRF_PROXY_COMPATIBILITY: {{ JENKINS_CSRF_PROXY_COMPATIBILITY }} diff --git a/playbooks/roles/jenkins_common/templates/config/seed_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/seed_config.yml.j2 new file mode 100644 index 00000000000..961fabb9ce7 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/seed_config.yml.j2 @@ -0,0 +1,3 @@ +--- +NAME: '{{ jenkins_common_seed_name }}' +XML_PATH: '{{ jenkins_common_seed_path }}' diff --git a/playbooks/roles/jenkins_common/templates/config/slack_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/slack_config.yml.j2 new file mode 100644 index 00000000000..8ae6c9097d1 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/slack_config.yml.j2 @@ -0,0 +1,8 @@ +--- +SLACK_BASE_URL: '{{ JENKINS_SLACK_BASE_URL }}' +IS_SLACK_BOT: '{{ JENKINS_IS_SLACK_BOT }}' +SLACK_ROOM: '{{ JENKINS_SLACK_ROOM }}' +SLACK_TEAM_DOMAIN: '{{ JENKINS_SLACK_TEAM_DOMAIN }}' +# The following must be an id of a credential created in +# src/main/groovy/3importCredentials.groovy +SLACK_CREDENTIAL_ID: '{{ JENKINS_SLACK_CREDENTIAL_ID }}' diff --git a/playbooks/roles/jenkins_common/templates/config/splunk_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/splunk_config.yml.j2 new file mode 100644 index 00000000000..e27c35792c5 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/splunk_config.yml.j2 @@ -0,0 +1,21 @@ +--- +SPLUNK_APP_ENABLED: {{ jenkins_common_splunk_enabled }} +SPLUNK_HOSTNAME: '{{ JENKINS_SPLUNK_HOSTNAME }}' +SPLUNK_HOST_PORT: {{ JENKINS_SPLUNK_PORT }} +USE_SSL: {{ jenkins_common_splunk_use_ssl }} +SPLUNK_TOKEN: '{{ JENKINS_SPLUNK_TOKEN }}' +RAW_EVENTS_ENABLED: {{ jenkins_common_splunk_raw_events_enabled }} +MAX_EVENT_BATCH_SIZE: {{ jenkins_common_splunk_batch_size }} +SPLUNK_APP_URL: '{{ JENKINS_SPLUNK_APP_URL }}' +RETRIES_ON_ERROR: {{ jenkins_common_splunk_retries }} +IGNORED_JOBS_PATTERN: {{ jenkins_common_splunk_ignored_jobs }} +MASTER_HOSTNAME: '{{ jenkins_common_splunk_master_hostname }}' +EVENT_SOURCE: '{{ jenkins_common_splunk_event_source }}' +SCRIPT_TYPE: '{{ jenkins_common_splunk_script_type }}' +SCRIPT_PATH: '{{ jenkins_common_splunk_script_path }}' +METADATA: +{% for metadata in jenkins_common_splunk_metadata %} + - DATA_SOURCE: '{{ metadata.data_source }}' + CONFIG_ITEM: '{{ metadata.config_item }}' + VALUE: '{{ metadata.value }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/timestamper_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/timestamper_config.yml.j2 new file mode 100644 index 00000000000..c1aff3b42d9 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/timestamper_config.yml.j2 @@ -0,0 +1,4 @@ +--- +SYSTEM_CLOCK_FORMAT: "{{ jenkins_common_timestamper_system_clock_format }}" +ELAPSED_TIME_FORMAT: "{{ jenkins_common_timestamper_elapsed_time_format }}" +ENABLED_ON_PIPELINES: {{ jenkins_common_timestamper_enabled_on_pipelines }} diff --git a/playbooks/roles/jenkins_common/templates/config/user_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/user_config.yml.j2 new file mode 100644 index 00000000000..3ff73bb0283 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/user_config.yml.j2 @@ -0,0 +1,6 @@ +--- +{% for user in JENKINS_USER_LIST %} +- USERNAME: '{{ user.USERNAME }}' + PASSWORD: '{{ user.PASSWORD }}' + EMAIL_ADDRESS: '{{ user.EMAIL_ADDRESS }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/etc/logrotate.d/jenkins_log.j2 b/playbooks/roles/jenkins_common/templates/etc/logrotate.d/jenkins_log.j2 new file mode 100644 index 00000000000..d0206562b8b --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/etc/logrotate.d/jenkins_log.j2 @@ -0,0 +1,11 @@ +# Put in place by ansible + +/var/log/jenkins/*jenkins.log { + weekly + copytruncate + missingok + rotate 52 + compress + delaycompress + notifempty +} diff --git a/playbooks/roles/jenkins_common/templates/etc/systemd/system/jenkins.service.j2 b/playbooks/roles/jenkins_common/templates/etc/systemd/system/jenkins.service.j2 new file mode 100644 index 00000000000..b2d743b94ea --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/etc/systemd/system/jenkins.service.j2 @@ -0,0 +1,21 @@ +[Unit] +Description=Jenkins + +[Service] +Type=forking +Environment="JENKINS_HOME={{ jenkins_common_home }}" +Environment="JENKINS_CONFIG_PATH={{ jenkins_common_config_path }}" +PassEnvironment=JENKINS_HOME JENKINS_CONFIG_PATH +User=jenkins +Group=jenkins +ExecStart=/usr/bin/java \ + {{ jenkins_common_jvm_args }} \ + -jar /usr/share/jenkins/jenkins.war \ + --daemon \ + --logfile=/var/log/jenkins/jenkins.log \ + --webroot=/var/cache/jenkins \ + --httpPort={{ jenkins_common_port }} \ + --ajp13Port=-1 + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/jenkins_common/templates/jenkins-env.sh.j2 b/playbooks/roles/jenkins_common/templates/jenkins-env.sh.j2 new file mode 100644 index 00000000000..b6c6b671199 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/jenkins-env.sh.j2 @@ -0,0 +1,4 @@ +export JENKINS_HOME='{{ jenkins_common_home }}' +export JENKINS_CONFIG_PATH='{{ jenkins_common_config_path }}' +export JENKINS_VERSION='{{ JENKINS_COMMON_VERSION }}' +export JENKINS_WAR_SOURCE='{{ jenkins_common_war_source}}' diff --git a/playbooks/roles/jenkins_data_engineering/defaults/main.yml b/playbooks/roles/jenkins_data_engineering/defaults/main.yml new file mode 100644 index 00000000000..80510583a7f --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/defaults/main.yml @@ -0,0 +1,261 @@ +--- +# See README.rst for variable descriptions + +jenkins_home: '/var/lib/jenkins' + +JENKINS_SERVER_NAME: 'jenkins.analytics.edx.org' +jenkins_user: 'jenkins' +jenkins_group: 'jenkins' +jenkins_user_home: '/home/{{ jenkins_user }}' +jenkins_port: 8080 +jenkins_nginx_port: 80 +jenkins_protocol_https: true +# Always start Jenkins in Quiet/Maintenance mode +start_jenkins_in_quiet_mode: true +AUTOMATION_USER: 'edx-analytics-automation' +jenkins_host_name: "{{ JENKINS_SERVER_NAME | default('jenkins') }}" + +JENKINS_VOLUMES: [] + +de_jenkins_seed_name: 'data_engineering_seed_job' +de_jenkins_seed_path: '{{ jenkins_common_config_path }}/xml/seed_job.xml' +de_jenkins_seed_job_source: '{{ role_path }}/../jenkins_data_engineering/files/xml/seed_job.xml' + +de_jenkins_user_uid: 900 +de_jenkins_group_gid: 900 +DE_JENKINS_VERSION: jenkins_2.89.4 +de_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx16g -Djenkins.install.runSetupWizard=false -Dmail.smtp.starttls.enable=true -Dmail.smtp.ssl.protocols=TLSv1.2' + +jenkins_base_environment_variables: + - NAME: 'AUTOMATION_USER' + VALUE: '{{ AUTOMATION_USER }}' + - NAME: 'PAGER_NOTIFY' + VALUE: '{{ PAGER_NOTIFY }}' + +JENKINS_DATA_ENGINEERING_EXTRA_PKGS: + - gettext + - pkg-config + - libsqlite3-dev + - libgeos-dev + - libmysqlclient-dev + - libgraphviz-dev + - libxmlsec1-dev + - zip + +# See https://www.vaultproject.io/downloads for latest version. +DE_JENKINS_VAULT_CLI_VERSION: 1.9.4 + +# plugins +de_jenkins_plugins_list: + - name: 'ansicolor' + version: '0.5.2' + group: 'org.jenkins-ci.plugins' + - name: 'ant' + version: '1.8' + group: 'org.jenkins-ci.plugins' + - name: 'antisamy-markup-formatter' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'buildgraph-view' + version: '1.1.1' + group: 'org.jenkins-ci.plugins' + - name: 'build-name-setter' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'build-timeout' + version: '1.19' + group: 'org.jenkins-ci.plugins' + - name: 'build-user-vars-plugin' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'cobertura' + version: '1.12.1' + group: 'org.jenkins-ci.plugins' + - name: 'copyartifact' + version: '1.39' + group: 'org.jenkins-ci.plugins' + - name: 'credentials' + version: '2.1.18' + group: 'org.jenkins-ci.plugins' + - name: 'credentials-binding' + version: '1.15' + group: 'org.jenkins-ci.plugins' + - name: 'cvs' + version: '2.14' + group: 'org.jenkins-ci.plugins' + - name: 'ec2' + version: '1.28' + group: 'org.jenkins-ci.plugins' + - name: 'email-ext' + version: '2.62' + group: 'org.jenkins-ci.plugins' + - name: 'envinject' + version: '1.92.1' + group: 'org.jenkins-ci.plugins' + - name: 'exclusive-execution' + version: '0.8' + group: 'org.jenkins-ci.plugins' + - name: 'external-monitor-job' + version: '1.4' + group: 'org.jenkins-ci.plugins' + - name: 'flexible-publish' + version: '0.15.2' + group: 'org.jenkins-ci.plugins' + - name: 'git' + version: '3.4.0' + group: 'org.jenkins-ci.plugins' + - name: 'git-client' + version: '2.7.2' + group: 'org.jenkins-ci.plugins' + - name: 'github' + version: '1.29.1' + group: 'com.coravy.hudson.plugins.github' + - name: 'github-api' + version: '1.90' + group: 'org.jenkins-ci.plugins' + - name: 'github-oauth' + version: '0.29' + group: 'org.jenkins-ci.plugins' + - name: 'gradle' + version: '1.24' + group: 'org.jenkins-ci.plugins' + - name: 'groovy' + version: '1.29' + group: 'org.jenkins-ci.plugins' + - name: 'groovy-postbuild' + version: '2.2' + group: 'org.jvnet.hudson.plugins' + - name: 'htmlpublisher' + version: '1.10' + group: 'org.jenkins-ci.plugins' + - name: 'javadoc' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'jobConfigHistory' + version: '2.18' + group: 'org.jenkins-ci.plugins' + - name: 'job-dsl' + version: '1.67' + group: 'org.jenkins-ci.plugins' + - name: 'junit' + version: '1.24' + group: 'org.jenkins-ci.plugins' + - name: 'ldap' + version: '1.11' + group: 'org.jenkins-ci.plugins' + - name: 'mailer' + version: '1.18' + group: 'org.jenkins-ci.plugins' + - name: 'mask-passwords' + version: '2.8' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-auth' + version: '1.2' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-project' + version: '1.13' + group: 'org.jenkins-ci.plugins' + - name: 'maven-plugin' + version: '3.1.2' + group: 'org.jenkins-ci.main' + - name: 'monitoring' + version: '1.56.0' + group: 'org.jvnet.hudson.plugins' + - name: 'multiple-scms' + version: '0.6' + group: 'org.jenkins-ci.plugins' + - name: 'nodelabelparameter' + version: '1.7.2' + group: 'org.jenkins-ci.plugins' + - name: 'pam-auth' + version: '1.2' + group: 'org.jenkins-ci.plugins' + - name: 'parameterized-trigger' + version: '2.34' + group: 'org.jenkins-ci.plugins' + - name: 'PrioritySorter' + version: '2.9' + group: 'org.jenkins-ci.plugins' + - name: 'rebuild' + version: '1.25' + group: 'com.sonyericsson.hudson.plugins.rebuild' + - name: 'reverse-proxy-auth-plugin' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'run-condition' + version: '1.0' + group: 'org.jenkins-ci.plugins' + - name: 'shiningpanda' + version: '0.23' + group: 'org.jenkins-ci.plugins' + - name: 'script-security' + version: '1.44' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-agent' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-credentials' + version: '1.14' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-slaves' + version: '1.9' + group: 'org.jenkins-ci.plugins' + - name: 'subversion' + version: '2.5.7' + group: 'org.jenkins-ci.plugins' + - name: 'text-finder' + version: '1.10' + group: 'org.jenkins-ci.plugins' + - name: 'throttle-concurrents' + version: '2.0.1' + group: 'org.jenkins-ci.plugins' + - name: 'timestamper' + version: '1.5.15' + group: 'org.jenkins-ci.plugins' + - name: 'token-macro' + version: '2.3' + group: 'org.jenkins-ci.plugins' + - name: 'translation' + version: '1.12' + group: 'org.jenkins-ci.plugins' + - name: 'violations' + version: '0.7.11' + group: 'org.jenkins-ci.plugins' + - name: 'workflow-job' + version: '2.11' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-step-api' + version: '2.12' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'xunit' + version: '1.93' + group: 'org.jenkins-ci.plugins' + +# ghprb +de_jenkins_ghprb_white_list_phrase: '.*[Aa]dd\W+to\W+whitelist.*' +de_jenkins_ghprb_ok_phrase: '.*ok\W+to\W+test.*' +de_jenkins_ghprb_retest_phrase: '.*jenkins\W+run\W+all.*' +de_jenkins_ghprb_skip_phrase: '.*\[[Ss]kip\W+ci\].*' +de_jenkins_ghprb_cron_schedule: 'H/5 * * * *' + +# github +JENKINS_GITHUB_CONFIG: '' + +# ec2 +de_jenkins_instance_cap: '500' + +JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT: 30 + +jenkins_connection_retries: 240 +jenkins_connection_delay: 1 + +jenkins_private_keyfile: "{{ jenkins_user_home }}/.ssh/id_rsa" +jenkins_public_keyfile: "{{ jenkins_private_keyfile }}.pub" + +# Be clear about which time zone the console log timestamps are in!!! +# use ZZ for Jenkins < 2.222.x +# use XX for Jenkins >= 2.222.x +de_jenkins_timestamper_system_time: "''HH:mm:ssZZ' '" + +# Populate the cloudwatch_procstat_patterns with patterns that you want to pass to the procstat config. +cloudwatch_procstat_patterns: ['nginx', 'jenkins.war', 'cloudwatch-agent'] diff --git a/playbooks/roles/jenkins_data_engineering/files/xml/seed_job.xml b/playbooks/roles/jenkins_data_engineering/files/xml/seed_job.xml new file mode 100644 index 00000000000..916925cd64d --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/files/xml/seed_job.xml @@ -0,0 +1,133 @@ + + + + Run createJobs script to seed all dsl jobs. + false + + + + -1 + 20 + -1 + -1 + + + + false + -1 + + + false + false + + + + + DSL_SCRIPT + Path to dsl script to run, from the root of the https://github.com/edx/jenkins-job-dsl repo (i.e. sample/jobs/sampleJob.groovy) + {{ JENKINS_JOB_DSL_SCRIPT_NAME }} + + + JOB_DSL_BRANCH + Branch of jenkins-job-dsl repo to use + {{ JENKINS_JOB_DSL_REPO_BRANCH }} + + + SECURE_BRANCH + Branch of the secure repo to use + {{ JENKINS_JOB_DSL_SECURE_BRANCH }} + + + COMMON_VARS_DIR + Base path for job specific configurations. + analytics-secure-config/job-configs/ + + + + + + + + 2 + + + https://github.com/edx/jenkins-job-dsl.git + + + + + ${JOB_DSL_BRANCH} + + + false + + + + + 2 + + + {{ JENKINS_JOB_DSL_SECURE_REPO_SSH }} + 1 + + + + + $SECURE_BRANCH + + + false + + + + analytics-secure-config + + + + + + + + false + false + false + false + + false + + + #!/usr/bin/env bash + # exit if user-supplied parameter does not exist + if [ ! -e ${DSL_SCRIPT} ]; then + echo "DSL Script '{DSL_SCRIPT}' does not exist. Please try again" + exit 1 + fi + + + + tert + + libs +assemble + + + (Default) + true + true + true + true + + + ${DSL_SCRIPT} + false + false + IGNORE + IGNORE + JENKINS_ROOT + lib/snakeyaml-1.17.jar +src/main/groovy + + + + + diff --git a/playbooks/roles/jenkins_data_engineering/meta/main.yml b/playbooks/roles/jenkins_data_engineering/meta/main.yml new file mode 100644 index 00000000000..72456c96c7c --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/meta/main.yml @@ -0,0 +1,87 @@ +--- +dependencies: + - common + - role: jenkins_common + jenkins_node_usage: 'NORMAL' + JENKINS_COMMON_VERSION: '{{ DE_JENKINS_VERSION }}' + jenkins_common_user_uid: '{{ de_jenkins_user_uid }}' + jenkins_common_group_gid: '{{ de_jenkins_group_gid }}' + jenkins_common_jvm_args: '{{ de_jenkins_jvm_args }}' + jenkins_common_main_labels: + - master + jenkins_common_configuration_scripts: + - 1addJarsToClasspath.groovy + - 2checkInstalledPlugins.groovy + - 3importCredentials.groovy + - 3mainConfiguration.groovy + - 3setGlobalProperties.groovy + - 3shutdownCLI.groovy + - 4configureGHOAuth.groovy + - 4configureGithub.groovy + - 4configureJobConfigHistory.groovy + - 4configureMailerPlugin.groovy + - 4configureMaskPasswords.groovy + - 4configureSecurity.groovy + - 5createLoggers.groovy + - 5addSeedJob.groovy + - 5configureEmailExtension.groovy + - 9StartInQuietMode.groovy + jenkins_common_plugins_list: '{{ de_jenkins_plugins_list }}' + jenkins_common_ghprb_white_list_phrase: '{{ de_jenkins_ghprb_white_list_phrase }}' + jenkins_common_ghprb_ok_phrase: '{{ de_jenkins_ghprb_ok_phrase }}' + jenkins_common_ghprb_retest_phrase: '{{ de_jenkins_ghprb_retest_phrase }}' + jenkins_common_ghprb_skip_phrase: '{{ de_jenkins_ghprb_skip_phrase }}' + jenkins_common_ghprb_cron_schedule: '{{ de_jenkins_ghprb_cron_schedule }}' + jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' + jenkins_common_instance_cap: '{{ de_jenkins_instance_cap }}' + jenkins_common_seed_name: '{{ de_jenkins_seed_name }}' + jenkins_common_seed_path: '{{ de_jenkins_seed_path }}' + jenkins_common_protocol_https: false + jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' + AUTOMATION_PRIVATE_KEY_SOURCE_PATH: null + jenkins_common_main_num_executors: '{{ JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT }}' + jenkins_common_jenkins_configuration_branch: '{{ JENKINS_CONFIGURATION_REPO_BRANCH }}' + jenkins_common_seed_job_source: '{{ de_jenkins_seed_job_source }}' + jenkins_common_dsl_script_security_enabled: false + jenkins_common_email_replyto: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' + JENKINS_SECURITY_GROUPS: + - NAME: 'Adminstrator' + PERMISSIONS: + - com.cloudbees.plugins.credentials.CredentialsProvider.Create + - com.cloudbees.plugins.credentials.CredentialsProvider.Delete + - com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains + - com.cloudbees.plugins.credentials.CredentialsProvider.Update + - com.cloudbees.plugins.credentials.CredentialsProvider.View + - hudson.model.Computer.Build + - hudson.model.Computer.Configure + - hudson.model.Computer.Connect + - hudson.model.Computer.Create + - hudson.model.Computer.Delete + - hudson.model.Computer.Disconnect + - hudson.model.Hudson.Administer + - hudson.model.Hudson.ConfigureUpdateCenter + - hudson.model.Hudson.Read + - hudson.model.Hudson.RunScripts + - hudson.model.Hudson.UploadPlugins + - hudson.model.Item.Build + - hudson.model.Item.Cancel + - hudson.model.Item.Configure + - hudson.model.Item.Create + - hudson.model.Item.Delete + - hudson.model.Item.Discover + - hudson.model.Item.Move + - hudson.model.Item.Read + - hudson.model.Item.Workspace + - hudson.model.Run.Delete + - hudson.model.Run.Replay + - hudson.model.Run.Update + - hudson.model.View.Configure + - hudson.model.View.Create + - hudson.model.View.Delete + - hudson.model.View.Read + - hudson.scm.SCM.Tag + USERS: '{{ JENKINS_DATA_ENGINEERING_AUTH_ADMINISTRATORS }}' + jenkins_common_main_env_vars: '{{ jenkins_base_environment_variables }} + {{ jenkins_additional_environment_variables }}' + jenkins_common_timestamper_system_clock_format: '{{ de_jenkins_timestamper_system_time }}' + + - role: mongo_client diff --git a/playbooks/roles/jenkins_data_engineering/tasks/main.yml b/playbooks/roles/jenkins_data_engineering/tasks/main.yml new file mode 100644 index 00000000000..83008c21b62 --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/tasks/main.yml @@ -0,0 +1,149 @@ +--- +# Tasks for role jenkins_data_engineering +# +# Overview: +# +# This role sets up a Jenkins Instance for analytics tasks. + +- name: Setting the hostname + hostname: + name: "{{ jenkins_host_name }}" + +- name: install jenkins analytics extra system packages + apt: + pkg={{ item }} state=present update_cache=yes + with_items: "{{ JENKINS_DATA_ENGINEERING_EXTRA_PKGS }}" + tags: + - jenkins + +# Download and install the Hashicorp Vault CLI: +- name: download vault CLI zip archive + get_url: + url: "https://releases.hashicorp.com/vault/%7B%7B%20DE_JENKINS_VAULT_CLI_VERSION%20%7D%7D/vault_%7B%7B%20DE_JENKINS_VAULT_CLI_VERSION%20%7D%7D_linux_amd64.zip" + dest: "/tmp/vault_{{ DE_JENKINS_VAULT_CLI_VERSION }}_linux_amd64.zip" + checksum: sha256:9be49dc07a1b73cc78dd5e5cca88588758bb1994fd954ae2c983eb5986887db5 + tags: + - jenkins-vault +- name: install vault CLI globally + unarchive: + src: "/tmp/vault_{{ DE_JENKINS_VAULT_CLI_VERSION }}_linux_amd64.zip" + dest: /usr/local/bin + remote_src: yes + tags: + - jenkins-vault + +- name: Create /edx/var/edxapp dir + file: + path: "/edx/var/edxapp" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + tags: + - jenkins-edxapp + +- name: create directory + file: + path: "/home/{{ jenkins_user }}/.ssh" + state: directory + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + tags: + - jenkins-auth + +- copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PRIVATE_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_private_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PUBLIC_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_public_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- name: jenkins user config dir + file: + name: "{{ jenkins_home }}/users/{{ jenkins_user }}" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - jenkins-auth + +- name: template jenkins user config.xml + template: + src: jenkins.user.config.xml + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + force: no # don't overwrite if already there + tags: + - jenkins-auth + +- name: fetch jenkins user public key + shell: "cat {{ jenkins_public_keyfile }}" + register: jenkins_public_key + tags: + - jenkins-auth + +- name: add jenkins user public key + lineinfile: + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + state: present + regexp: "^\\s*" + line: "{{ jenkins_public_key.stdout }}" + tags: + - jenkins-auth + +- name: Wait for Jenkins to start up before proceeding. + shell: "curl -D - --silent --max-time 5 {{ JENKINS_MAIN_URL }}cli/" + register: result + until: (result.stdout.find("403 Forbidden") != -1) or (result.stdout.find("200 OK") != -1) and (result.stdout.find("Please wait while") == -1) + retries: 60 + delay: 10 + changed_when: false + check_mode: no + tags: + - jenkins-auth + +- name: wipe initialization scripts from jenkins_commons + file: + path: '{{ jenkins_home }}/init.groovy.d/{{ item }}' + state: absent + # Only delete files that don't match 9StartInQuietMode.groovy when start_jenkins_in_quiet_mode is on. + when: item != "9StartInQuietMode.groovy" and start_jenkins_in_quiet_mode + with_items: "{{ jenkins_common_configuration_scripts }}" + + tags: + - jenkins-auth + +- name: wipe initialization configuration files from jenkins_commons + file: + path: '{{ jenkins_home }}/init-configs/' + state: absent + tags: + - jenkins-auth + +- name: restart Jenkins + service: name=jenkins state=restarted + tags: + - jenkins-auth + +# Add the jenkins user's ssh public key to the running user's autorized keys +# This is needed so that this jenkins instance can be used to update system users +- name: Add the jenkins user's ssh public key to the running user's autorized keys + lineinfile: + path: /home/{{ ansible_ssh_user }}/.ssh/authorized_keys + create: yes + line: "{{ lookup('file', JENKINS_DATA_ENGINEERING_AUTOMATION_PUBLIC_KEY_SOURCE_PATH) }}" + tags: + - ssh + - ssh:keys diff --git a/playbooks/roles/jenkins_data_engineering/templates/jenkins.user.config.xml b/playbooks/roles/jenkins_data_engineering/templates/jenkins.user.config.xml new file mode 100644 index 00000000000..0ec74ca3e0a --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/templates/jenkins.user.config.xml @@ -0,0 +1,53 @@ + + + {{ jenkins_user }} + + + + + + + + + + + + + + + + + + + + + edx + shadow + jenkins + authenticated + + 1457073573763 + + + + + + + All + false + false + + + + + + + + + + + + false + + + diff --git a/playbooks/roles/jenkins_data_engineering_new/defaults/main.yml b/playbooks/roles/jenkins_data_engineering_new/defaults/main.yml new file mode 100644 index 00000000000..974164e8cbf --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/defaults/main.yml @@ -0,0 +1,310 @@ +--- +# See README.rst for variable descriptions + +jenkins_home: '/var/lib/jenkins' + +JENKINS_SERVER_NAME: 'jenkins-new.analytics.edx.org' +jenkins_user: 'jenkins' +jenkins_group: 'jenkins' +# the groups that the jenkins user must belong to +jenkins_groups: "jenkins,docker" +jenkins_user_home: '/home/{{ jenkins_user }}' +jenkins_port: 8080 +jenkins_nginx_port: 80 +jenkins_protocol_https: true +# Always start Jenkins in Quiet/Maintenance mode +start_jenkins_in_quiet_mode: true +AUTOMATION_USER: 'edx-analytics-automation' +jenkins_host_name: "{{ JENKINS_SERVER_NAME | default('jenkins') }}" +# We should expect a 403 Forbidden from Jenkins during the init stage, +# as it should not be publicly available. +de_jenkins_ready_status_code: 403 + +JENKINS_VOLUMES: [] + +de_jenkins_seed_name: 'data_engineering_seed_job' +de_jenkins_seed_path: '{{ jenkins_common_config_path }}/xml/seed_job.xml' +de_jenkins_seed_job_source: '{{ role_path }}/../jenkins_data_engineering_new/files/xml/seed_job.xml' + +de_jenkins_user_uid: 900 +de_jenkins_group_gid: 900 +DE_JENKINS_VERSION: jenkins_2.222.3 +de_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx16g -Djenkins.install.runSetupWizard=false -Dmail.smtp.starttls.enable=true -Dmail.smtp.ssl.protocols=TLSv1.2' + +de_jenkins_snap_pkgs: + - hub + +# Python versions to install onto the system +de_jenkins_python_versions: + - python3.5 + - python3.7 + - python3.8 + - python3.9 + +# Jenkins aliases to installed Python binaries +de_jenkins_python_installations: + - PYTHON_ALIAS: 'System-CPython-2.7' + PYTHON_PATH: '/usr/bin/python2.7' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'System-CPython-3.5.2' + PYTHON_PATH: '/usr/bin/python3.5' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'PYTHON_3.7' + PYTHON_PATH: '/usr/bin/python3.7' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'PYTHON_3.8' + PYTHON_PATH: '/usr/bin/python3.8' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'PYTHON_3.9' + PYTHON_PATH: '/usr/bin/python3.9' + PYTHON_PROPERTIES: [] + +jenkins_base_environment_variables: + - NAME: 'AUTOMATION_USER' + VALUE: '{{ AUTOMATION_USER }}' + - NAME: 'PAGER_NOTIFY' + VALUE: '{{ PAGER_NOTIFY }}' + +JENKINS_DATA_ENGINEERING_EXTRA_PKGS: + - gettext + - pkg-config + - libsqlite3-dev + - libgeos-dev + - libmysqlclient-dev + - libgraphviz-dev + - libxmlsec1-dev + - zip + - jq + +# See https://www.vaultproject.io/downloads for latest version. +DE_JENKINS_VAULT_CLI_VERSION: 1.9.4 + +# plugins +de_jenkins_plugins_list: + - name: 'audit-trail' + version: '3.4' + group: 'org.jenkins-ci.plugins' + - name: 'ansicolor' + version: '0.5.2' + group: 'org.jenkins-ci.plugins' + - name: 'ant' + version: '1.8' + group: 'org.jenkins-ci.plugins' + - name: 'antisamy-markup-formatter' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'aws-credentials' + version: '1.24' + group: 'org.jenkins-ci.plugins' + - name: 'build-name-setter' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'build-timeout' + version: '1.19' + group: 'org.jenkins-ci.plugins' + - name: 'build-user-vars-plugin' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'copyartifact' + version: '1.39' + group: 'org.jenkins-ci.plugins' + - name: 'credentials' + version: '2.3.0' + group: 'org.jenkins-ci.plugins' + - name: 'credentials-binding' + version: '1.15' + group: 'org.jenkins-ci.plugins' + - name: 'email-ext' + version: '2.66' + group: 'org.jenkins-ci.plugins' + - name: 'envinject' + version: '2.3.0' + group: 'org.jenkins-ci.plugins' + - name: 'exclusive-execution' + version: '0.8' + group: 'org.jenkins-ci.plugins' + - name: 'external-monitor-job' + version: '1.4' + group: 'org.jenkins-ci.plugins' + - name: 'flexible-publish' + version: '0.15.2' + group: 'org.jenkins-ci.plugins' + - name: 'ghprb' + version: '1.42.1' + group: 'org.jenkins-ci.plugins' + - name: 'git' + version: '4.2.2' + group: 'org.jenkins-ci.plugins' + - name: 'git-client' + version: '3.0.0' + group: 'org.jenkins-ci.plugins' + - name: 'github' + version: '1.29.2' + group: 'com.coravy.hudson.plugins.github' + - name: 'github-api' + version: '1.111' + group: 'org.jenkins-ci.plugins' + - name: 'github-oauth' + version: '0.33' + group: 'org.jenkins-ci.plugins' + - name: 'gradle' + version: '1.29' + group: 'org.jenkins-ci.plugins' + - name: 'groovy' + version: '2.2' + group: 'org.jenkins-ci.plugins' + - name: 'groovy-postbuild' + version: '2.4' + group: 'org.jvnet.hudson.plugins' + - name: 'htmlpublisher' + version: '1.21' + group: 'org.jenkins-ci.plugins' + - name: 'javadoc' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'jobConfigHistory' + version: '2.24' + group: 'org.jenkins-ci.plugins' + - name: 'job-dsl' + version: '1.77' + group: 'org.jenkins-ci.plugins' + - name: 'junit' + version: '1.28' + group: 'org.jenkins-ci.plugins' + - name: 'ldap' + version: '1.20' + group: 'org.jenkins-ci.plugins' + - name: 'mailer' + version: '1.23' + group: 'org.jenkins-ci.plugins' + - name: 'mask-passwords' + version: '2.13' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-auth' + version: '2.3' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-project' + version: '1.14' + group: 'org.jenkins-ci.plugins' + - name: 'maven-plugin' + version: '3.4' + group: 'org.jenkins-ci.main' + - name: 'monitoring' + version: '1.76.0' + group: 'org.jvnet.hudson.plugins' + - name: 'multiple-scms' + version: '0.6' + group: 'org.jenkins-ci.plugins' + - name: 'nodelabelparameter' + version: '1.7.2' + group: 'org.jenkins-ci.plugins' + - name: 'pam-auth' + version: '1.5.1' + group: 'org.jenkins-ci.plugins' + - name: 'parameterized-trigger' + version: '2.35.2' + group: 'org.jenkins-ci.plugins' + - name: 'postbuild-task' + version: '1.8' + group: 'org.jvnet.hudson.plugins' + - name: 'PrioritySorter' + version: '2.9' + group: 'org.jenkins-ci.plugins' + - name: 'rebuild' + version: '1.31' + group: 'com.sonyericsson.hudson.plugins.rebuild' + - name: 'reverse-proxy-auth-plugin' + version: '1.6.3' + group: 'org.jenkins-ci.plugins' + - name: 'run-condition' + version: '1.0' + group: 'org.jenkins-ci.plugins' + - name: 'shiningpanda' + version: '0.23' + group: 'org.jenkins-ci.plugins' + - name: 'slack' + version: '2.21' + group: 'org.jenkins-ci.plugins' + - name: 'script-security' + version: '1.71' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-agent' + version: '1.17' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-credentials' + version: '1.17.3' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-slaves' + version: '1.28.1' + group: 'org.jenkins-ci.plugins' + - name: 'subversion' + version: '2.13.1' + group: 'org.jenkins-ci.plugins' + - name: 'text-finder' + version: '1.10' + group: 'org.jenkins-ci.plugins' + - name: 'throttle-concurrents' + version: '2.0.1' + group: 'org.jenkins-ci.plugins' + - name: 'timestamper' + version: '1.11.2' + group: 'org.jenkins-ci.plugins' + - name: 'token-macro' + version: '2.10' + group: 'org.jenkins-ci.plugins' + - name: 'translation' + version: '1.16' + group: 'org.jenkins-ci.plugins' + - name: 'violations' + version: '0.7.11' + group: 'org.jenkins-ci.plugins' + - name: 'workflow-cps' + version: '2.80' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-cps-global-lib' + version: '2.15' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-durable-task-step' + version: '2.35' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-job' + version: '2.35' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-step-api' + version: '2.21' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'ws-cleanup' + version: '0.39' + group: 'org.jenkins-ci.plugins' + - name: 'xunit' + version: '1.93' + group: 'org.jenkins-ci.plugins' + +# ghprb +de_jenkins_ghprb_white_list_phrase: '.*[Aa]dd\W+to\W+whitelist.*' +de_jenkins_ghprb_ok_phrase: '.*ok\W+to\W+test.*' +de_jenkins_ghprb_retest_phrase: '.*jenkins\W+run\W+all.*' +de_jenkins_ghprb_skip_phrase: '.*\[[Ss]kip\W+ci\].*' +de_jenkins_ghprb_cron_schedule: 'H/5 * * * *' + +# github +JENKINS_GITHUB_CONFIG: '' + +# ec2 +de_jenkins_instance_cap: '500' + +JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT: 30 + +jenkins_connection_retries: 240 +jenkins_connection_delay: 1 + +jenkins_private_keyfile: "{{ jenkins_user_home }}/.ssh/id_rsa" +jenkins_public_keyfile: "{{ jenkins_private_keyfile }}.pub" + +# Be clear about which time zone the console log timestamps are in!!! +# use ZZ for Jenkins < 2.222.x +# use XX for Jenkins >= 2.222.x +de_jenkins_timestamper_system_time: "''HH:mm:ssXX' '" + +# Populate the cloudwatch_procstat_patterns with patterns that you want to pass to the procstat config. +cloudwatch_procstat_patterns: ['nginx', 'jenkins.war', 'cloudwatch-agent'] diff --git a/playbooks/roles/jenkins_data_engineering_new/files/xml/seed_job.xml b/playbooks/roles/jenkins_data_engineering_new/files/xml/seed_job.xml new file mode 100644 index 00000000000..cb450aa05f8 --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/files/xml/seed_job.xml @@ -0,0 +1,132 @@ + + + + Run createJobs script to seed all dsl jobs. + false + + + + -1 + 20 + -1 + -1 + + + + false + -1 + + + false + false + + + + + DSL_SCRIPT + Path to dsl script to run, from the root of the https://github.com/edx/jenkins-job-dsl repo (i.e. sample/jobs/sampleJob.groovy) + {{ JENKINS_JOB_DSL_SCRIPT_NAME }} + + + JOB_DSL_BRANCH + Branch of jenkins-job-dsl repo to use + {{ JENKINS_JOB_DSL_REPO_BRANCH }} + + + SECURE_BRANCH + Branch of the secure repo to use + {{ JENKINS_JOB_DSL_SECURE_BRANCH }} + + + COMMON_VARS_DIR + Base path for job specific configurations. + analytics-secure-config/job-configs/ + + + + + + + + 2 + + + https://github.com/edx/jenkins-job-dsl.git + + + + + ${JOB_DSL_BRANCH} + + + false + + + + + 2 + + + {{ JENKINS_JOB_DSL_SECURE_REPO_SSH }} + 1 + + + + + $SECURE_BRANCH + + + false + + + + analytics-secure-config + + + + + + + + false + false + false + false + + false + + + #!/usr/bin/env bash + # exit if user-supplied parameter does not exist + if [ ! -e ${DSL_SCRIPT} ]; then + echo "DSL Script '{DSL_SCRIPT}' does not exist. Please try again" + exit 1 + fi + + + + + libs +assemble + + + (Default) + true + true + true + true + + + ${DSL_SCRIPT} + false + false + IGNORE + IGNORE + JENKINS_ROOT + lib/snakeyaml-1.17.jar +src/main/groovy + + + + + diff --git a/playbooks/roles/jenkins_data_engineering_new/meta/main.yml b/playbooks/roles/jenkins_data_engineering_new/meta/main.yml new file mode 100644 index 00000000000..1903d20353f --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/meta/main.yml @@ -0,0 +1,54 @@ +--- +dependencies: + - common + - role: jenkins_common + jenkins_node_usage: 'NORMAL' + JENKINS_COMMON_VERSION: '{{ DE_JENKINS_VERSION }}' + jenkins_common_user_uid: '{{ de_jenkins_user_uid }}' + jenkins_common_group_gid: '{{ de_jenkins_group_gid }}' + jenkins_common_jvm_args: '{{ de_jenkins_jvm_args }}' + jenkins_common_main_labels: + - master + jenkins_common_configuration_scripts: + - 1addJarsToClasspath.groovy + - 2checkInstalledPlugins.groovy + - 3importCredentials.groovy + - 3installPython.groovy + - 3mainConfiguration.groovy + - 3setGlobalProperties.groovy + - 4configureGHOAuth.groovy + - 4configureGHPRB.groovy + - 4configureGithub.groovy + - 4configureJobConfigHistory.groovy + - 4configureMailerPlugin.groovy + - 4configureMaskPasswords.groovy + - 4configureSlack.groovy + - 4configureSecurity.groovy + - 5createLoggers.groovy + - 5addSeedJob.groovy + - 5configureEmailExtension.groovy + - 9StartInQuietMode.groovy + jenkins_common_plugins_list: '{{ de_jenkins_plugins_list }}' + jenkins_common_ghprb_white_list_phrase: '{{ de_jenkins_ghprb_white_list_phrase }}' + jenkins_common_ghprb_ok_phrase: '{{ de_jenkins_ghprb_ok_phrase }}' + jenkins_common_ghprb_retest_phrase: '{{ de_jenkins_ghprb_retest_phrase }}' + jenkins_common_ghprb_skip_phrase: '{{ de_jenkins_ghprb_skip_phrase }}' + jenkins_common_ghprb_cron_schedule: '{{ de_jenkins_ghprb_cron_schedule }}' + jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' + jenkins_common_instance_cap: '{{ de_jenkins_instance_cap }}' + jenkins_common_seed_name: '{{ de_jenkins_seed_name }}' + jenkins_common_seed_path: '{{ de_jenkins_seed_path }}' + jenkins_common_protocol_https: false + jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' + AUTOMATION_PRIVATE_KEY_SOURCE_PATH: null + jenkins_common_main_num_executors: '{{ JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT }}' + jenkins_common_jenkins_configuration_branch: '{{ JENKINS_CONFIGURATION_REPO_BRANCH }}' + jenkins_common_seed_job_source: '{{ de_jenkins_seed_job_source }}' + jenkins_common_dsl_script_security_enabled: false + jenkins_common_email_replyto: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' + jenkins_common_main_env_vars: '{{ jenkins_base_environment_variables }} + {{ jenkins_additional_environment_variables }}' + jenkins_common_ready_status_code: '{{ de_jenkins_ready_status_code }}' + jenkins_common_python_versions: '{{ de_jenkins_python_versions }}' + jenkins_common_python_installations: '{{ de_jenkins_python_installations }}' + jenkins_common_snap_pkgs: '{{ de_jenkins_snap_pkgs }}' + jenkins_common_timestamper_system_clock_format: '{{ de_jenkins_timestamper_system_time }}' diff --git a/playbooks/roles/jenkins_data_engineering_new/tasks/main.yml b/playbooks/roles/jenkins_data_engineering_new/tasks/main.yml new file mode 100644 index 00000000000..2312fb66374 --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/tasks/main.yml @@ -0,0 +1,151 @@ +--- +# Tasks for role jenkins_data_engineering +# +# Overview: +# +# This role sets up a Jenkins Instance for analytics tasks. + +- name: Setting the hostname + hostname: + name: "{{ jenkins_host_name }}" + +- name: install jenkins analytics extra system packages + apt: + pkg={{ item }} state=present update_cache=yes + with_items: "{{ JENKINS_DATA_ENGINEERING_EXTRA_PKGS }}" + tags: + - jenkins + +# Download and install the Hashicorp Vault CLI: +- name: download vault CLI zip archive + get_url: + url: "https://releases.hashicorp.com/vault/%7B%7B%20DE_JENKINS_VAULT_CLI_VERSION%20%7D%7D/vault_%7B%7B%20DE_JENKINS_VAULT_CLI_VERSION%20%7D%7D_linux_amd64.zip" + dest: "/tmp/vault_{{ DE_JENKINS_VAULT_CLI_VERSION }}_linux_amd64.zip" + checksum: sha256:9be49dc07a1b73cc78dd5e5cca88588758bb1994fd954ae2c983eb5986887db5 + tags: + - jenkins-vault +- name: install vault CLI globally + unarchive: + src: "/tmp/vault_{{ DE_JENKINS_VAULT_CLI_VERSION }}_linux_amd64.zip" + dest: /usr/local/bin + remote_src: yes + tags: + - jenkins-vault + +- name: Create /edx/var/edxapp dir + file: + path: "/edx/var/edxapp" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + tags: + - jenkins-edxapp + +- name: create ssh directory for jenkins user + file: + path: "/home/{{ jenkins_user }}/.ssh" + state: directory + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + tags: + - jenkins-auth + +- name: add jenkins private key + copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PRIVATE_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_private_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- name: add jenkins public key + copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PUBLIC_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_public_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- name: create jenkins user config dir + file: + name: "{{ jenkins_home }}/users/{{ jenkins_user }}" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - jenkins-auth + +- name: template jenkins user config.xml + template: + src: jenkins.user.config.xml + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + force: no # don't overwrite if already there + tags: + - jenkins-auth + +- name: fetch jenkins user public key + shell: "cat {{ jenkins_public_keyfile }}" + register: jenkins_public_key + tags: + - jenkins-auth + +- name: add jenkins user public key + lineinfile: + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + state: present + regexp: "^\\s*" + line: "{{ jenkins_public_key.stdout }}" + tags: + - jenkins-auth + +- name: Wait for Jenkins to start up before proceeding. + shell: "curl -D - --silent --max-time 5 {{ JENKINS_MAIN_URL }}cli/" + register: result + until: (result.stdout.find("403 Forbidden") != -1) or (result.stdout.find("200 OK") != -1) and (result.stdout.find("Please wait while") == -1) + retries: 60 + delay: 10 + changed_when: false + check_mode: no + tags: + - jenkins-auth + +- name: wipe initialization scripts from jenkins_commons + file: + path: '{{ jenkins_home }}/init.groovy.d/{{ item }}' + state: absent + # Only delete files that don't match 9StartInQuietMode.groovy when start_jenkins_in_quiet_mode is on. + when: item != "9StartInQuietMode.groovy" and start_jenkins_in_quiet_mode + with_items: "{{ jenkins_common_configuration_scripts }}" + + tags: + - jenkins-auth + +- name: wipe initialization configuration files from jenkins_commons + file: + path: '{{ jenkins_home }}/init-configs/' + state: absent + tags: + - jenkins-auth + +- name: restart Jenkins + service: name=jenkins state=restarted + tags: + - jenkins-auth + +# Add the jenkins user's ssh public key to the running user's autorized keys +# This is needed so that this jenkins instance can be used to update system users +- name: Add the jenkins user's ssh public key to the running user's autorized keys + lineinfile: + path: /home/{{ ansible_ssh_user }}/.ssh/authorized_keys + create: yes + line: "{{ lookup('file', JENKINS_DATA_ENGINEERING_AUTOMATION_PUBLIC_KEY_SOURCE_PATH) }}" + tags: + - ssh + - ssh:keys diff --git a/playbooks/roles/jenkins_data_engineering_new/tasks/system.yml b/playbooks/roles/jenkins_data_engineering_new/tasks/system.yml new file mode 100644 index 00000000000..63b65bcf18c --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/tasks/system.yml @@ -0,0 +1,11 @@ +--- +- name: Create jenkins group + group: name={{ jenkins_group }} state=present + +# The Jenkins account needs a login shell because Jenkins uses scp +- name: Add the jenkins user to the group and configure shell + user: + name: '{{ jenkins_user }}' + groups: '{{ jenkins_groups }}' + append: yes + shell: /bin/bash \ No newline at end of file diff --git a/playbooks/roles/jenkins_data_engineering_new/templates/jenkins.user.config.xml b/playbooks/roles/jenkins_data_engineering_new/templates/jenkins.user.config.xml new file mode 100644 index 00000000000..1776428c6ab --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/templates/jenkins.user.config.xml @@ -0,0 +1,53 @@ + + + {{ jenkins_user }} + + + + + + + + + + + + + + + + + + + + + edx + shadow + jenkins + authenticated + + 1457073573763 + + + + + + + All + false + false + + + + + + + + + + + + false + + + diff --git a/playbooks/roles/jenkins_it/defaults/main.yml b/playbooks/roles/jenkins_it/defaults/main.yml new file mode 100644 index 00000000000..591c1f89fa9 --- /dev/null +++ b/playbooks/roles/jenkins_it/defaults/main.yml @@ -0,0 +1,309 @@ +it_jenkins_user_uid: 1002 +it_jenkins_group_gid: 1004 +it_jenkins_groups: 'jenkins,docker' +IT_JENKINS_VERSION: jenkins_2.150.2 +it_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx16384m -DsessionTimeout=60' +it_jenkins_main_num_executors: 5 + +oracle_path: '/opt/oracle' +IT_ORACLE_S3_PATH: '' +IT_ORACLE_INSTANT_CLIENT: '' +it_oracle_packages: + - libaio1 + +it_jenkins_python_versions: + - python3.5-dev + +it_jenkins_configuration_scripts: + - 1addJarsToClasspath.groovy + - 2checkInstalledPlugins.groovy + - 3addUsers.groovy + - 3importCredentials.groovy + - 3installGroovy.groovy + - 3installPython.groovy + - 3mainConfiguration.groovy + - 3setGlobalProperties.groovy + - 3shutdownCLI.groovy + - 4configureGHPRB.groovy + - 4configureGit.groovy + - 4configureGithub.groovy + - 4configureMailerPlugin.groovy + - 4configureMaskPasswords.groovy + - 4configureSAML.groovy + - 4configureSecurity.groovy + - 4configureSlack.groovy + - 5createLoggers.groovy + +jenkins_it_non_plugin_template_files: + - user_config + - credentials + - email_ext_config + - ghprb_config + - git_config + - github_config + - groovy_config + - job_config_history + - log_config + - mailer_config + - main_config + - mask_passwords_config + - properties_config + - python_config + - saml_config + - security + - seed_config + - slack_config + +it_jenkins_plugins_list: + - name: 'analysis-core' + version: '1.95' + group: 'org.jvnet.hudson.plugins' + - name: 'ansicolor' + version: '0.5.2' + group: 'org.jenkins-ci.plugins' + - name: 'ant' + version: '1.8' + group: 'org.jenkins-ci.plugins' + - name: 'antisamy-markup-formatter' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'aws-credentials' + version: '1.24' + group: 'org.jenkins-ci.plugins' + - name: 'aws-java-sdk' + version: '1.11.457' + group: 'org.jenkins-ci.plugins' + - name: 'badge' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'bouncycastle-api' + version: '2.17' + group: 'org.jenkins-ci.plugins' + - name: 'build-name-setter' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'build-timeout' + version: '1.19' + group: 'org.jenkins-ci.plugins' + - name: 'build-user-vars-plugin' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'cobertura' + version: '1.12.1' + group: 'org.jenkins-ci.plugins' + - name: 'copyartifact' + version: '1.39' + group: 'org.jenkins-ci.plugins' + - name: 'credentials' + version: '2.1.18' + group: 'org.jenkins-ci.plugins' + - name: 'credentials-binding' + version: '1.15' + group: 'org.jenkins-ci.plugins' + - name: 'cvs' + version: '2.14' + group: 'org.jenkins-ci.plugins' + - name: 'docker-commons' + version: '1.8' + group: 'org.jenkins-ci.plugins' + - name: 'email-ext' + version: '2.62' + group: 'org.jenkins-ci.plugins' + - name: 'envinject' + version: '2.1.5' + group: 'org.jenkins-ci.plugins' + - name: 'exclusive-execution' + version: '0.8' + group: 'org.jenkins-ci.plugins' + - name: 'external-monitor-job' + version: '1.4' + group: 'org.jenkins-ci.plugins' + - name: 'ghprb' + version: '1.42.0' + group: 'org.jenkins-ci.plugins' + - name: 'git' + version: '3.9.3' + group: 'org.jenkins-ci.plugins' + - name: 'github' + version: '1.29.2' + group: 'com.coravy.hudson.plugins.github' + - name: 'github-api' + version: '1.90' + group: 'org.jenkins-ci.plugins' + - name: 'github-branch-source' + version: '2.3.6' + group: 'org.jenkins-ci.plugins' + - name: 'gradle' + version: '1.29' + group: 'org.jenkins-ci.plugins' + - name: 'groovy' + version: '2.1' + group: 'org.jenkins-ci.plugins' + - name: 'groovy-postbuild' + version: '2.4' + group: 'org.jvnet.hudson.plugins' + - name: 'htmlpublisher' + version: '1.16' + group: 'org.jenkins-ci.plugins' + - name: 'javadoc' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'job-dsl' + version: '1.70' + group: 'org.jenkins-ci.plugins' + - name: 'junit' + version: '1.26' + group: 'org.jenkins-ci.plugins' + - name: 'mailer' + version: '1.21' + group: 'org.jenkins-ci.plugins' + - name: 'mask-passwords' + version: '2.10.1' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-auth' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-project' + version: '1.13' + group: 'org.jenkins-ci.plugins' + - name: 'maven-plugin' + version: '3.1.2' + group: 'org.jenkins-ci.main' + - name: 'monitoring' + version: '1.76.0' + group: 'org.jvnet.hudson.plugins' + - name: 'multiple-scms' + version: '0.6' + group: 'org.jenkins-ci.plugins' + - name: 'nodelabelparameter' + version: '1.7.2' + group: 'org.jenkins-ci.plugins' + - name: 'pam-auth' + version: '1.4' + group: 'org.jenkins-ci.plugins' + - name: 'parameterized-trigger' + version: '2.35.2' + group: 'org.jenkins-ci.plugins' + - name: 'pipeline-model-definition' + version: '1.2.9' + group: 'org.jenkinsci.plugins' + - name: 'pipeline-build-step' + version: '2.5.1' + group: 'org.jenkins-ci.plugins' + - name: 'pipeline-utility-steps' + version: '2.0.2' + group: 'org.jenkins-ci.plugins' + - name: 'PrioritySorter' + version: '2.9' + group: 'org.jenkins-ci.plugins' + - name: 'rebuild' + version: '1.29' + group: 'com.sonyericsson.hudson.plugins.rebuild' + - name: 'resource-disposer' + version: '0.12' + group: 'org.jenkins-ci.plugins' + - name: 'run-condition' + version: '1.0' + group: 'org.jenkins-ci.plugins' + - name: 'saml' + version: '1.1.0' + group: 'org.jenkins-ci.plugins' + - name: 'script-security' + version: '1.53' + group: 'org.jenkins-ci.plugins' + - name: 'shiningpanda' + version: '0.23' + group: 'org.jenkins-ci.plugins' + - name: 'slack' + version: '2.2' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-agent' + version: '1.17' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-credentials' + version: '1.14' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-slaves' + version: '1.28.1' + group: 'org.jenkins-ci.plugins' + - name: 'structs' + version: '1.17' + group: 'org.jenkins-ci.plugins' + - name: 'timestamper' + version: '1.8.9' + group: 'org.jenkins-ci.plugins' + - name: 'token-macro' + version: '2.6' + group: 'org.jenkins-ci.plugins' + - name: 'translation' + version: '1.16' + group: 'org.jenkins-ci.plugins' + - name: 'violations' + version: '0.7.11' + group: 'org.jenkins-ci.plugins' + - name: 'warnings' + version: '5.0.1' + group: 'org.jvnet.hudson.plugins' + - name: 'warnings-ng' + version: '2.2.1' + group: 'io.jenkins.plugins' + - name: 'workflow-aggregator' + version: '2.5' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-cps' + version: '2.46' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-durable-task-step' + version: '2.18' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-support' + version: '2.18' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'ws-cleanup' + version: '0.34' + group: 'org.jenkins-ci.plugins' + - name: 'xunit' + version: '1.93' + group: 'org.jenkins-ci.plugins' + +# ghprb +it_jenkins_ghprb_white_list_phrase: '.*[Aa]dd\W+to\W+whitelist.*' +it_jenkins_ghprb_ok_phrase: '.*ok\W+to\W+test.*' +it_jenkins_ghprb_retest_phrase: '.*jenkins\W+run\W+all.*' +it_jenkins_ghprb_skip_phrase: '.*\[[Ss]kip\W+ci\].*' +it_jenkins_ghprb_cron_schedule: 'H/5 * * * *' + +# github +JENKINS_GITHUB_CONFIG: '' + +# seed +it_jenkins_seed_name: 'manually_seed_one_job' + +# logs +it_jenkins_log_list: + - LOG_RECORDER: 'Ghprb' + LOGGERS: + - name: 'org.jenkinsci.plugins.ghprb.GhprbPullRequest' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbRootAction' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbRepository' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbGitHub' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.Ghprb' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbTrigger' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbBuilds' + log_level: 'ALL' + - LOG_RECORDER: 'GithubPushLogs' + LOGGERS: + - name: 'com.cloudbees.jenkins.GitHubPushTrigger' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.github.webhook.WebhookManager' + log_level: 'ALL' + - name: 'com.cloudbees.jenkins.GitHubWebHook' + log_level: 'ALL' + - name: 'hudson.plugins.git.GitSCM' + log_level: 'ALL' diff --git a/playbooks/roles/jenkins_it/meta/main.yml b/playbooks/roles/jenkins_it/meta/main.yml new file mode 100644 index 00000000000..0d92de8fcbb --- /dev/null +++ b/playbooks/roles/jenkins_it/meta/main.yml @@ -0,0 +1,25 @@ +--- +dependencies: + - common + - role: jenkins_common + JENKINS_COMMON_VERSION: '{{ IT_JENKINS_VERSION }}' + jenkins_common_user_uid: '{{ it_jenkins_user_uid }}' + jenkins_common_group_gid: '{{ it_jenkins_group_gid }}' + jenkins_common_groups: '{{ it_jenkins_groups }}' + jenkins_common_jvm_args: '{{ it_jenkins_jvm_args }}' + jenkins_common_configuration_scripts: '{{ it_jenkins_configuration_scripts }}' + jenkins_common_template_files: '{{ it_jenkins_template_files }}' + jenkins_common_plugins_list: '{{ it_jenkins_plugins_list }}' + jenkins_common_ghprb_white_list_phrase: '{{ it_jenkins_ghprb_white_list_phrase }}' + jenkins_common_ghprb_ok_phrase: '{{ it_jenkins_ghprb_ok_phrase }}' + jenkins_common_ghprb_retest_phrase: '{{ it_jenkins_ghprb_retest_phrase }}' + jenkins_common_ghprb_skip_phrase: '{{ it_jenkins_ghprb_skip_phrase }}' + jenkins_common_ghprb_cron_schedule: '{{ it_jenkins_ghprb_cron_schedule }}' + jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' + jenkins_common_seed_name: '{{ it_jenkins_seed_name }}' + jenkins_common_log_list: '{{ it_jenkins_log_list }}' + jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' + jenkins_common_email_replyto: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' + jenkins_common_python_versions: '{{ it_jenkins_python_versions }}' + jenkins_common_non_plugin_template_files: '{{ jenkins_it_non_plugin_template_files }}' + jenkins_common_main_num_executors: '{{ it_jenkins_main_num_executors }}' diff --git a/playbooks/roles/jenkins_it/tasks/main.yml b/playbooks/roles/jenkins_it/tasks/main.yml new file mode 100644 index 00000000000..a2fbb3429a5 --- /dev/null +++ b/playbooks/roles/jenkins_it/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: make oracle directory for instant client + file: + path: "{{ oracle_path }}" + state: directory + owner: "root" + group: "root" + mode: 0755 + tags: + - install + - install:system-requirements +- name: download instant client + shell: "aws s3 cp {{ IT_ORACLE_S3_PATH }}/{{ IT_ORACLE_INSTANT_CLIENT }} {{ oracle_path }}" + tags: + - install + - install:system-requirements +- name: unzip instant client + unarchive: + src: "{{ oracle_path }}/{{ IT_ORACLE_INSTANT_CLIENT }}" + dest: "{{ oracle_path }}" + creates: "{{ oracle_path }}/instantclient_12_2" + tags: + - install + - install:system-requirements +- name: clean up zip file + file: + path: "oracle_path/{{ IT_ORACLE_INSTANT_CLIENT }}" + state: absent +- name: Install oracle specific packages + apt: + name: '{{ item }}' + state: present + update_cache: yes + with_items: '{{ it_oracle_packages }}' + tags: + - install + - install:system-requirements diff --git a/playbooks/roles/jenkins_master/defaults/main.yml b/playbooks/roles/jenkins_master/defaults/main.yml index 973dd0cf9cb..516c7077e17 100644 --- a/playbooks/roles/jenkins_master/defaults/main.yml +++ b/playbooks/roles/jenkins_master/defaults/main.yml @@ -3,71 +3,110 @@ jenkins_user: "jenkins" jenkins_group: "edx" jenkins_server_name: "jenkins.testeng.edx.org" jenkins_port: 8080 +jenkins_nginx_port: 80 +jenkins_protocol_https: true +jenkins_job_venv_dir: "/edx/var/jenkins/jobvenvs/" +jenkins_venv_src_dir: "../util/jenkins" -jenkins_version: 1.538 -jenkins_deb_url: "http://pkg.jenkins-ci.org/debian/binary/jenkins_%7B%7B%20jenkins_version%20%7D%7D_all.deb" -jenkins_deb: "jenkins_{{ jenkins_version }}_all.deb" +# Mysql5.7 pkg for focal (There is no mysql5.7 pkg in focal, so we added mysql bionic repo) +jenkins_install_mysql_5_7: false +jenkins_mysql_apt_keyserver: "keyserver.ubuntu.com" +jenkins_mysql_apt_key: "467B942D3A79BD29" +jenkins_mysql_repo: "deb http://repo.mysql.com/apt//ubuntu/ bionic mysql-5.7" + +JENKINS_VERSION: '1.651.3' +jenkins_deb_url: "https://archives.jenkins-ci.org/debian-stable/jenkins_%7B%7B%20JENKINS_VERSION%20%7D%7D_all.deb" +jenkins_deb: "jenkins_{{ JENKINS_VERSION }}_all.deb" +# Jenkins jvm args are set when starting the Jenkins service, e.g., "-Xmx1024m" +jenkins_jvm_args: "" jenkins_plugins: + - { name: "ansicolor", version: "0.4.1" } + - { name: "ant", version: "1.2" } + - { name: "build-flow-plugin", version: "0.17" } + - { name: "build-flow-test-aggregator", version: "1.0" } + - { name: "build-flow-toolbox-plugin", version: "0.1" } - { name: "build-name-setter", version: "1.3" } - { name: "build-pipeline-plugin", version: "1.4" } - - { name: "build-timeout", version: "1.11" } - - { name: "cobertura", version: "1.9.2" } - - { name: "copyartifact", version: "1.28" } + - { name: "build-timeout", version: "1.14.1" } + - { name: "build-user-vars-plugin", version: "1.5" } + - { name: "buildgraph-view", version: "1.1.1" } + - { name: "cloudbees-folder", version: "5.2.1" } + - { name: "cobertura", version: "1.9.6" } + - { name: "copyartifact", version: "1.32.1" } - { name: "copy-to-slave", version: "1.4.3" } - - { name: "credentials", version: "1.8.3" } + - { name: "credentials", version: "1.24" } - { name: "dashboard-view", version: "2.9.1" } - - { name: "ec2", version: "1.19" } - - { name: "github", version: "1.8" } - - { name: "github-api", version: "1.44" } - - { name: "github-oauth", version: "0.14" } - - { name: "htmlpublisher", version: "1.2" } - - { name: "javadoc", version: "1.1" } - - { name: "jobConfigHistory", version: "2.4" } - - { name: "jquery", version: "1.7.2-1" } - - { name: "mailer", version: "1.5" } + - { name: "ec2", version: "1.28" } + - { name: "envinject", version: "1.92.1" } + - { name: "external-monitor-job", version: "1.4" } + - { name: "ghprb", version: "1.22.4" } + - { name: "git", version: "2.4.0"} + - { name: "git-client", version: "1.19.0"} + - { name: "github", version: "1.14.0" } + - { name: "github-api", version: "1.69" } + - { name: "github-oauth", version: "0.22.3" } + - { name: "github-sqs-plugin", version: "1.5" } + - { name: "gradle", version: "1.24" } + - { name: "grails", version: "1.7" } + - { name: "groovy-postbuild", version: "2.2" } + - { name: "htmlpublisher", version: "1.3" } + - { name: "javadoc", version: "1.3" } + - { name: "jobConfigHistory", version: "2.10" } + - { name: "job-dsl", version: "1.43" } + - { name: "junit", version: "1.3" } + - { name: "ldap", version: "1.11" } + - { name: "mailer", version: "1.16" } + - { name: "mapdb-api", version: "1.0.6.0" } + - { name: "mask-passwords", version: "2.8" } + - { name: "matrix-auth", version: "1.2" } + - { name: "matrix-project", version: "1.4" } + - { name: "monitoring", version: "1.56.0" } + - { name: "multiple-scms", version: "0.5" } - { name: "nested-view", version: "1.10" } - { name: "next-build-number", version: "1.0" } + - { name: "node-iterator-api", version: "1.5" } - { name: "notification", version: "1.5" } - - { name: "pam-auth", version: "1.0" } - - { name: "parameterized-trigger", version: "2.20" } + - { name: "pam-auth", version: "1.2" } + - { name: "parameterized-trigger", version: "2.25" } - { name: "postbuild-task", version: "1.8" } + - { name: "plain-credentials", version: "1.1" } + - { name: "PrioritySorter", version: "2.9" } + - { name: "rebuild", version: "1.25" } - { name: "sauce-ondemand", version: "1.61" } - - { name: "s3", version: "0.5" } - - { name: "ssh-agent", version: "1.3" } - - { name: "ssh-credentials", version: "1.5.1" } - - { name: "ssh-slaves", version: "1.4" } - - { name: "shiningpanda", version: "0.20" } + - { name: "scm-api", version: "0.2" } + - { name: "script-security", version: "1.12" } + - { name: "s3", version: "0.6" } + - { name: "ssh-agent", version: "1.5" } + - { name: "ssh-credentials", version: "1.11" } + - { name: "ssh-slaves", version: "1.9" } + - { name: "shiningpanda", version: "0.23" } + - { name: "throttle-concurrents", version: "1.9.0" } - { name: "tmpcleaner", version: "1.1" } - - { name: "token-macro", version: "1.8.1" } - - { name: "translation", version: "1.10" } + - { name: "token-macro", version: "1.10" } + - { name: "timestamper", version: "1.5.15" } + - { name: "thinBackup", version: "1.7.4" } + - { name: "translation", version: "1.12" } - { name: "violations", version: "0.7.11" } - - { name: "multiple-scms", version: "0.2" } - - { name: "timestamper", version: "1.5.7" } + - { name: "windows-slaves", version: "1.0" } + - { name: "xunit", version: "1.93"} jenkins_bundled_plugins: - "credentials" - "git" + - "pam-auth" - "ssh-credentials" - "ssh-slaves" -jenkins_custom_plugins: - - { repo_name: "git-client-plugin", - repo_url: "https://github.com/edx/git-client-plugin.git", - package: "git-client.hpi", - version: "2f7fc4648fe7239918a7babd0515930d40d0a761" } - - { repo_name: "git-plugin", - repo_url: "https://github.com/edx/git-plugin.git", - package: "git.hpi", - version: "4dc0c5ce7d38855d0ab0d9cef9cd3325917d748b" } +jenkins_custom_plugins: [] jenkins_debian_pkgs: - - openjdk-7-jdk - nginx - git - maven - daemon - python-pycurl + - psmisc # Extra packages need for a specific jenkins instance. JENKINS_EXTRA_PKGS: [] diff --git a/playbooks/roles/jenkins_master/handlers/main.yml b/playbooks/roles/jenkins_master/handlers/main.yml index dc8ac9d7b50..fab33520918 100644 --- a/playbooks/roles/jenkins_master/handlers/main.yml +++ b/playbooks/roles/jenkins_master/handlers/main.yml @@ -1,9 +1,24 @@ --- - name: restart Jenkins - service: name=jenkins state=restarted + service: + name: jenkins + state: restarted + tags: + - manage + - manage:start - name: start nginx - service: name=nginx state=started + service: + name: nginx + state: started + tags: + - manage + - manage:start - name: reload nginx - service: name=nginx state=reloaded + service: + name: nginx + state: reloaded + tags: + - manage + - manage:start diff --git a/playbooks/roles/jenkins_master/meta/main.yml b/playbooks/roles/jenkins_master/meta/main.yml index 2083f0e1251..ab690bf8356 100644 --- a/playbooks/roles/jenkins_master/meta/main.yml +++ b/playbooks/roles/jenkins_master/meta/main.yml @@ -1,3 +1,6 @@ --- dependencies: - common + - nginx + - role: oraclejdk + tags: java diff --git a/playbooks/roles/jenkins_master/tasks/datadog.yml b/playbooks/roles/jenkins_master/tasks/datadog.yml new file mode 100644 index 00000000000..4ebe3d6a317 --- /dev/null +++ b/playbooks/roles/jenkins_master/tasks/datadog.yml @@ -0,0 +1,5 @@ +- name: Enable jenkins datadog + shell: cp /etc/dd-agent/conf.d/jenkins.yaml.example /etc/dd-agent/conf.d/jenkins.yaml + args: + creates: /etc/dd-agent/conf.d/jenkins.yaml + notify: restart the datadog service diff --git a/playbooks/roles/jenkins_master/tasks/main.yml b/playbooks/roles/jenkins_master/tasks/main.yml index c7a2bec1abc..679b1f0f8e5 100644 --- a/playbooks/roles/jenkins_master/tasks/main.yml +++ b/playbooks/roles/jenkins_master/tasks/main.yml @@ -1,121 +1,305 @@ --- +- name: add the mysql signing key + apt_key: + keyserver: "{{ jenkins_mysql_apt_keyserver }}" + id: "{{ jenkins_mysql_apt_key }}" + when: ansible_distribution_release == 'focal' and jenkins_install_mysql_5_7|bool + tags: + - install + - install:system-requirements + +- name: add the mysql-5.7 repo to the sources list + apt_repository: + repo: "{{ jenkins_mysql_repo }}" + state: present + when: ansible_distribution_release == 'focal' and jenkins_install_mysql_5_7|bool + tags: + - install + - install:system-requirements -- name: install jenkins specific system packages +- name: Install jenkins specific system packages apt: - pkg={{','.join(jenkins_debian_pkgs)}} - state=present update_cache=yes + name: "{{ jenkins_debian_pkgs }}" + state: present + update_cache: yes tags: - - jenkins + - jenkins + - install + - install:system-requirements -- name: install jenkins extra system packages +- name: Install jenkins extra system packages apt: - pkg={{','.join(JENKINS_EXTRA_PKGS)}} - state=present update_cache=yes + name: "{{ JENKINS_EXTRA_PKGS }}" + state: present + update_cache: yes tags: - - jenkins + - jenkins + - install + - install:system-requirements -- name: create jenkins group - group: name={{ jenkins_group }} state=present +- name: Create jenkins group + group: + name: "{{ jenkins_group }}" + state: present + tags: + - install + - install:system-requirements -- name: add the jenkins user to the group - user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }} +- name: Add the jenkins user to the group + user: + name: "{{ jenkins_user }}" + append: yes + groups: "{{ jenkins_group }}" + tags: + - install + - install:system-requirements # Should be resolved in the next release, but until then we need to do this # https://issues.jenkins-ci.org/browse/JENKINS-20407 -- name: workaround for JENKINS-20407 - command: "mkdir -p /var/run/jenkins" +- name: Workaround for JENKINS-20407 + file: + path: "/var/run/jenkins" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - install + - install:system-requirements +# TODO in Ansible 2.1 we can do apt: deb="{{ jenkins_deb_url }}" - name: download Jenkins package get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}" + tags: + - install + - install:app-requirements - name: install Jenkins package - command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}" + apt: + deb: "/tmp/{{ jenkins_deb }}" + tags: + - install + - install:app-requirements + +- name: Stop Jenkins + service: + name: jenkins + state: stopped + tags: + - manage + - manage:stop + +- name: Set jvm args + lineinfile: + backup: yes + dest: /etc/default/jenkins + regexp: '^JAVA_ARGS=' + line: 'JAVA_ARGS="{{ jenkins_jvm_args }}"' + tags: + - java + - jenkins + - install + - install:app-configuration -- name: stop Jenkins - service: name=jenkins state=stopped +- name: Set jenkins home + lineinfile: + backup: yes + dest: /etc/default/jenkins + regexp: '^JENKINS_HOME=' + line: 'JENKINS_HOME="{{ jenkins_home }}"' + tags: + - java + - jenkins + - install + - install:app-configuration # Move /var/lib/jenkins to Jenkins home (on the EBS) -- name: move /var/lib/jenkins - command: mv /var/lib/jenkins {{ jenkins_home }} - creates={{ jenkins_home }} +- name: Move /var/lib/jenkins + command: "mv /var/lib/jenkins {{ jenkins_home }}" + args: + creates: "{{ jenkins_home }}" + tags: + - install + - install:base -- name: set owner for Jenkins home - file: path={{ jenkins_home }} recurse=yes state=directory - owner={{ jenkins_user }} group={{ jenkins_group }} +- name: Set owner for Jenkins home + file: + path: "{{ jenkins_home }}" + recurse: yes + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - install + - install:app-requirements # Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins # since Jenkins will expect its files to be in /var/lib/jenkins -- name: symlink /var/lib/jenkins - file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link - owner={{ jenkins_user }} group={{ jenkins_group }} - notify: - - restart Jenkins +- name: Symlink /var/lib/jenkins + file: + src: "{{ jenkins_home }}" + dest: /var/lib/jenkins + state: link + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - install + - install:base -- name: make plugins directory - sudo_user: jenkins - shell: mkdir -p {{ jenkins_home }}/plugins +# Using this instead of the user module because the user module +# fails if the directory exists. +- name: Set home directory for jenkins user + shell: "usermod -d {{ jenkins_home }} {{ jenkins_user }}" + tags: + - install + - install:base + +- name: Make plugins directory + file: + path: "{{ jenkins_home }}/plugins" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - install + - install:base + - install:plugins # We first download the plugins to a temp directory and include # the version in the file name. That way, if we increment # the version, the plugin will be updated in Jenkins -- name: download Jenkins plugins - get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi - dest=/tmp/{{ item.name }}_{{ item.version }} - with_items: jenkins_plugins - -- name: install Jenkins plugins - command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi - with_items: jenkins_plugins - -- name: set Jenkins plugin permissions - file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi - owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 - with_items: jenkins_plugins - notify: - - restart Jenkins +- name: Download Jenkins plugins + get_url: + url: "http://updates.jenkins-ci.org/download/plugins/%7B%7B%20item.name%20%7D%7D/%7B%7B%20item.version%20%7D%7D/%7B%7B%20item.name%20%7D%7D.hpi" + dest: "/tmp/{{ item.name }}_{{ item.version }}" + with_items: "{{ jenkins_plugins }}" + register: jenkins_plugin_downloads + tags: + - install + - install:base + - install:plugins + +- name: Install Jenkins plugins + command: "cp {{ item.dest }} {{ jenkins_home }}/plugins/{{ item.item.name }}.hpi" + with_items: "{{ jenkins_plugin_downloads.results }}" + when: item.changed + tags: + - install + - install:base + - install:plugins + +- name: Set Jenkins plugin permissions + file: + path: "{{ jenkins_home }}/plugins/{{ item.item.name }}.hpi" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: "0644" + with_items: "{{ jenkins_plugin_downloads.results }}" + when: item.changed + tags: + - install + - install:base + - install:plugins # We had to fork some plugins to workaround # certain issues. If these changes get merged # upstream, we may be able to use the regular plugin install process. # Until then, we compile and install the forks ourselves. -- name: checkout custom plugin repo - git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }} - with_items: jenkins_custom_plugins +- name: Checkout custom plugin repo + git: + repo: "{{ item.repo_url }}" + dest: "/tmp/{{ item.repo_name }}" + version: "{{ item.version }}" + accept_hostkey: yes + with_items: "{{ jenkins_custom_plugins }}" + register: jenkins_custom_plugins_checkout + tags: + - install + - install:base + - install:plugins -- name: compile custom plugins - command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }} - with_items: jenkins_custom_plugins +- name: Compile custom plugins + command: "mvn -Dmaven.test.skip=true install" + args: + chdir: "/tmp/{{ item.item.repo_name }}" + with_items: "{{ jenkins_custom_plugins_checkout.results }}" + when: item.changed + tags: + - install + - install:base + - install:plugins -- name: install custom plugins - command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }} - {{ jenkins_home }}/plugins/{{ item.package }} - with_items: jenkins_custom_plugins - notify: - - restart Jenkins +- name: Install custom plugins + command: mv /tmp/{{ item.item.repo_name }}/target/{{ item.item.package }} + {{ jenkins_home }}/plugins/{{ item.item.package }} + with_items: "{{ jenkins_custom_plugins_checkout.results }}" + when: item.changed + tags: + - install + - install:base + - install:plugins -- name: set custom plugin permissions - file: path={{ jenkins_home }}/plugins/{{ item.package }} - owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 - with_items: jenkins_custom_plugins +- name: Set custom plugin permissions + file: + path: "{{ jenkins_home }}/plugins/{{ item.item.package }}" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: "0700" + with_items: "{{ jenkins_custom_plugins_checkout.results }}" + when: item.changed + tags: + - install + - install:base + - install:plugins + +- name: Create directory to hold job virtualenvs + file: + path: "{{ jenkins_job_venv_dir }}" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + when: jenkins_job_venv_dir is defined + tags: + - install + - install:venv-management +- name: Copy virtualenv script tools to jenkins + copy: + remote_src: no + src: "{{ jenkins_venv_src_dir }}/virtualenv_tools.sh" + dest: "{{ jenkins_job_venv_dir }}/virtualenv_tools.sh" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + when: jenkins_job_venv_dir is defined + tags: + - install + - install:venv-management # Plugins that are bundled with Jenkins are "pinned". # Jenkins will overwrite updated plugins with its built-in version # unless we create a ".pinned" file for the plugin. # See https://issues.jenkins-ci.org/browse/JENKINS-13129 -- name: create plugin pin files - command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned - creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned - with_items: jenkins_bundled_plugins +- name: Create plugin pin files + command: "touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned" + args: + creates: "{{ jenkins_home }}/plugins/{{ item }}.jpi.pinned" + with_items: "{{ jenkins_bundled_plugins }}" + tags: + - install + - install:base + - install:plugins -- name: setup nginix vhost - template: - src=etc/nginx/sites-available/jenkins.j2 - dest=/etc/nginx/sites-available/jenkins +- include: datadog.yml + when: COMMON_ENABLE_DATADOG + tags: + - datadog + - install + - install:base -- name: enable jenkins vhost - file: - src=/etc/nginx/sites-available/jenkins - dest=/etc/nginx/sites-enabled/jenkins - state=link - notify: start nginx +- name: restart Jenkinks + service: + name: jenkins + state: restarted + tags: + - manage + - manage:start diff --git a/playbooks/roles/jenkins_master/templates/etc/nginx/sites-available/jenkins.j2 b/playbooks/roles/jenkins_master/templates/etc/nginx/sites-available/jenkins.j2 deleted file mode 100644 index ec9349ca40c..00000000000 --- a/playbooks/roles/jenkins_master/templates/etc/nginx/sites-available/jenkins.j2 +++ /dev/null @@ -1,25 +0,0 @@ -server { - listen 80; - server_name {{ jenkins_server_name }}; - - location / { - proxy_pass http://localhost:{{ jenkins_port }}; - - # Rewrite HTTPS requests from WAN to HTTP requests on LAN - proxy_redirect http:// https://; - - # The following settings from https://wiki.jenkins-ci.org/display/JENKINS/Running+Hudson+behind+Nginx - sendfile off; - - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_connect_timeout 150; - proxy_send_timeout 100; - proxy_read_timeout 100; - proxy_buffers 4 32k; - client_max_body_size 8m; - client_body_buffer_size 128k; - - } -} \ No newline at end of file diff --git a/playbooks/roles/jenkins_worker/defaults/main.yml b/playbooks/roles/jenkins_worker/defaults/main.yml deleted file mode 100644 index aaa72986405..00000000000 --- a/playbooks/roles/jenkins_worker/defaults/main.yml +++ /dev/null @@ -1,117 +0,0 @@ ---- -jenkins_user: "jenkins" -jenkins_group: "jenkins" -jenkins_home: /home/jenkins - -# System packages -jenkins_debian_pkgs: - - build-essential - - gfortran - - graphviz - - libgraphviz-dev - - libopenblas-dev - - liblapack-dev - - libmysqlclient-dev - - libxml2-dev - - libgeos-dev - - libxslt1-dev - - npm - - pkg-config - - gettext - -# Ruby Specific Vars -jenkins_rbenv_root: "{{ jenkins_home }}/.rbenv" -jenkins_ruby_version: "1.9.3-p374" - -# JSCover direct download URL -jscover_url: "http://files.edx.org/testeng/JSCover-1.0.2.zip" -jscover_version: "1.0.2" - -# Python -jenkins_venv: "{{ jenkins_home }}/wheel_venv" -jenkins_pip: "{{ jenkins_venv }}/bin/pip" -jenkins_wheel_dir: "{{ jenkins_home }}/wheelhouse" -jenkins_wheels: - - { pkg: "numpy==1.6.2", wheel: "numpy-1.6.2-cp27-none-linux_x86_64.whl" } - - { pkg: "django-celery==3.0.17", wheel: "django_celery-3.0.17-py27-none-any.whl" } - - { pkg: "beautifulsoup4==4.1.3", wheel: "beautifulsoup4-4.1.3-py27-none-any.whl"} - - { pkg: "beautifulsoup==3.2.1", wheel: "BeautifulSoup-3.2.1-py27-none-any.whl" } - - { pkg: "bleach==1.2.2", wheel: "bleach-1.2.2-py27-none-any.whl" } - - { pkg: "html5lib==0.95", wheel: "html5lib-0.95-py27-none-any.whl" } - - { pkg: "boto==2.13.3", wheel: "boto-2.13.3-py27-none-any.whl" } - - { pkg: "celery==3.0.19", wheel: "celery-3.0.19-py27-none-any.whl" } - - { pkg: "dealer==0.2.3", wheel: "dealer-0.2.3-py27-none-any.whl" } - - { pkg: "django-countries==1.5", wheel: "django_countries-1.5-py27-none-any.whl" } - - { pkg: "django-filter==0.6.0", wheel: "django_filter-0.6-py27-none-any.whl" } - - { pkg: "django-followit==0.0.3", wheel: "django_followit-0.0.3-py27-none-any.whl" } - - { pkg: "django-kombu==0.9.4", wheel: "kombu-2.5.16-py27-none-any.whl" } - - { pkg: "django-mako==0.1.5pre", wheel: "django_mako-0.1.5pre-py27-none-any.whl" } - - { pkg: "django-model-utils==1.4.0", wheel: "django_model_utils-1.4.0-py27-none-any.whl" } - - { pkg: "django-masquerade==0.1.6", wheel: "django_masquerade-0.1.6-py27-none-any.whl" } - - { pkg: "django-mptt==0.5.5", wheel: "django_mptt-0.5.5-py27-none-any.whl" } - - { pkg: "django-openid-auth==0.4", wheel: "python_openid-2.2.5-py27-none-any.whl" } - - { pkg: "django-robots==0.9.1", wheel: "django_robots-0.9.1-py27-none-any.whl" } - - { pkg: "django-sekizai==0.6.1", wheel: "django_sekizai-0.6.1-py27-none-any.whl" } - - { pkg: "django-ses==0.4.1", wheel: "django_ses-0.4.1-py27-none-any.whl" } - - { pkg: "django-storages==1.1.5", wheel: "django_storages-1.1.5-py27-none-any.whl" } - - { pkg: "django-method-override==0.1.0", wheel: "django_method_override-0.1.0-py27-none-any.whl" } - - { pkg: "djangorestframework==2.3.5", wheel: "djangorestframework-2.3.5-py27-none-any.whl" } - - { pkg: "django==1.4.8", wheel: "Django-1.4.8-py27-none-any.whl" } - - { pkg: "feedparser==5.1.3", wheel: "feedparser-5.1.3-py27-none-any.whl" } - - { pkg: "fs==0.4.0", wheel: "fs-0.4.0-py27-none-any.whl" } - - { pkg: "GitPython==0.3.2.RC1", wheel: "GitPython-0.3.2.RC1-py27-none-any.whl" } - - { pkg: "glob2==0.3", wheel: "glob2-0.3-py27-none-any.whl" } - - { pkg: "gunicorn==0.17.4", wheel: "gunicorn-0.17.4-py27-none-any.whl" } - - { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" } - - { pkg: "lxml==3.0.1", wheel: "lxml-3.0.1-cp27-none-linux_x86_64.whl" } - - { pkg: "mako==0.7.3", wheel: "Mako-0.7.3-py27-none-any.whl" } - - { pkg: "Markdown==2.2.1", wheel: "Markdown-2.2.1-py27-none-any.whl" } - - { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" } - - { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" } - - { pkg: "oauthlib==0.5.1", wheel: "oauthlib-0.5.1-py27-none-any.whl" } - - { pkg: "paramiko==1.9.0", wheel: "paramiko-1.9.0-py27-none-any.whl" } - - { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" } - - { pkg: "Pillow==1.7.8", wheel: "Pillow-1.7.8-cp27-none-linux_x86_64.whl" } - - { pkg: "polib==1.0.3", wheel: "polib-1.0.3-py27-none-any.whl" } - - { pkg: "pycrypto>=2.6", wheel: "pycrypto-2.6.1-cp27-none-linux_x86_64.whl" } - - { pkg: "pygments==1.6", wheel: "Pygments-1.6-py27-none-any.whl" } - - { pkg: "pygraphviz==1.1", wheel: "pygraphviz-1.1-cp27-none-linux_x86_64.whl" } - - { pkg: "pymongo==2.4.1", wheel: "pymongo-2.4.1-cp27-none-linux_x86_64.whl" } - - { pkg: "pyparsing==1.5.6", wheel: "pyparsing-1.5.6-py27-none-any.whl" } - - { pkg: "python-memcached==1.48", wheel: "python_memcached-1.48-py27-none-any.whl" } - - { pkg: "python-openid==2.2.5", wheel: "django_openid_auth-0.4-py27-none-any.whl" } - - { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" } - - { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" } - - { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" } - - { pkg: "requests==1.2.3", wheel: "requests-1.2.3-py27-none-any.whl" } - - { pkg: "scipy==0.11.0", wheel: "scipy-0.11.0-cp27-none-linux_x86_64.whl" } - - { pkg: "Shapely==1.2.16", wheel: "Shapely-1.2.16-cp27-none-linux_x86_64.whl" } - - { pkg: "singledispatch==3.4.0.2", wheel: "singledispatch-3.4.0.2-py27-none-any.whl" } - - { pkg: "sorl-thumbnail==11.12", wheel: "sorl_thumbnail-11.12-py27-none-any.whl" } - - { pkg: "South==0.7.6", wheel: "South-0.7.6-py27-none-any.whl" } - - { pkg: "sympy==0.7.1", wheel: "sympy-0.7.1-py27-none-any.whl" } - - { pkg: "xmltodict==0.4.1", wheel: "xmltodict-0.4.1-py27-none-any.whl" } - - { pkg: "django-ratelimit-backend==0.6", wheel: "django_ratelimit_backend-0.6-py27-none-any.whl" } - - { pkg: "ipython==0.13.1", wheel: "ipython-0.13.1-py27-none-any.whl" } - - { pkg: "watchdog==0.6.0", wheel: "watchdog-0.6.0-py27-none-any.whl" } - - { pkg: "dogapi==1.2.1", wheel: "dogapi-1.2.1-py27-none-any.whl" } - - { pkg: "newrelic==2.4.0.4", wheel: "newrelic-2.4.0.4-cp27-none-linux_x86_64.whl" } - - { pkg: "sphinx==1.1.3", wheel: "Sphinx-1.1.3-py27-none-any.whl" } - - { pkg: "Babel==1.3", wheel: "Babel-1.3-py27-none-any.whl" } - - { pkg: "transifex-client==0.9.1", wheel: "transifex_client-0.9.1-py27-none-any.whl" } - - { pkg: "coverage==3.6", wheel: "coverage-3.6-cp27-none-linux_x86_64.whl" } - - { pkg: "factory_boy==2.0.2", wheel: "factory_boy-2.0.2-py27-none-any.whl" } - - { pkg: "mock==1.0.1", wheel: "mock-1.0.1-py27-none-any.whl" } - - { pkg: "nosexcover==1.0.7", wheel: "nosexcover-1.0.7-py27-none-any.whl" } - - { pkg: "pep8==1.4.5", wheel: "pep8-1.4.5-py27-none-any.whl" } - - { pkg: "pylint==0.28", wheel: "pylint-0.28.0-py27-none-any.whl" } - - { pkg: "rednose==0.3", wheel: "rednose-0.3-py27-none-any.whl" } - - { pkg: "selenium==2.34.0", wheel: "selenium-2.34.0-py27-none-any.whl" } - - { pkg: "splinter==0.5.4", wheel: "splinter-0.5.4-py27-none-any.whl" } - - { pkg: "django_nose==1.1", wheel: "django_nose-1.1-py27-none-any.whl" } - - { pkg: "django_debug_toolbar", wheel: "django_debug_toolbar-0.10.2-py2.py3-none-any.whl" } - - { pkg: "django-debug-toolbar-mongo", wheel: "django_debug_toolbar_mongo-0.1.10-py27-none-any.whl" } - - { pkg: "nose-ignore-docstring", wheel: "nose_ignore_docstring-0.2-py27-none-any.whl" } - - { pkg: "nose-exclude", wheel: "nose_exclude-0.1.10-py27-none-any.whl" } - - { pkg: "django-crum==0.5", wheel: "django_crum-0.5-py27-none-any.whl" } - - { pkg: "MySQL-python==1.2.4", wheel: "MySQL_python-1.2.4-cp27-none-linux_x86_64.whl" } diff --git a/playbooks/roles/jenkins_worker/meta/main.yml b/playbooks/roles/jenkins_worker/meta/main.yml deleted file mode 100644 index f7f2be2a2a4..00000000000 --- a/playbooks/roles/jenkins_worker/meta/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -dependencies: - - role: rbenv - rbenv_user: "{{ jenkins_user }}" - rbenv_dir: "{{ jenkins_home }}" - rbenv_ruby_version: "{{ jenkins_ruby_version }}" diff --git a/playbooks/roles/jenkins_worker/tasks/jscover.yml b/playbooks/roles/jenkins_worker/tasks/jscover.yml deleted file mode 100644 index f6bc08f2f98..00000000000 --- a/playbooks/roles/jenkins_worker/tasks/jscover.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Install Java - apt: pkg=openjdk-7-jre-headless state=present - -- name: Download JSCover - get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip - -- name: Unzip JSCover - shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover - creates=/var/tmp/jscover - -- name: Install JSCover JAR - command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ jscover_version }}.jar - creates=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar - -- name: Set JSCover permissions - file: path="/usr/local/bin/JSCover-all-{{ jscover_version }}.jar" state=file - owner=root group=root mode=0755 diff --git a/playbooks/roles/jenkins_worker/tasks/main.yml b/playbooks/roles/jenkins_worker/tasks/main.yml deleted file mode 100644 index 36b672f70d0..00000000000 --- a/playbooks/roles/jenkins_worker/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# jenkins -# -# Provision a Jenkins instance. -# -# Parameters: -# `jenkins_user`: jenkins -# `jenkins_home`: /var/lib/jenkins -# `jenkins_user_home`: /home/jenkins - -- include: system.yml -- include: python.yml -- include: jscover.yml diff --git a/playbooks/roles/jenkins_worker/tasks/python.yml b/playbooks/roles/jenkins_worker/tasks/python.yml deleted file mode 100644 index 658fc7f5fda..00000000000 --- a/playbooks/roles/jenkins_worker/tasks/python.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- - -# Install scripts requiring a GitHub OAuth token -- name: Install requests Python library - pip: name=requests state=present - -- fail: OAuth token not defined - when: github_oauth_token is not defined - -- name: Install Python GitHub PR auth script - template: src="http://github.com/github_pr_auth.py.j2" dest="/usr/local/bin/github_pr_auth.py" - owner=root group=root - mode=755 - -- name: Install Python GitHub post status script - template: src="http://github.com/github_post_status.py.j2" dest="/usr/local/bin/github_post_status.py" - owner=root group=root - mode=755 - -# Create wheelhouse to enable fast virtualenv creation -- name: Create wheel virtualenv - command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }} - sudo_user: "{{ jenkins_user }}" - -- name: Install wheel - pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv - sudo_user: "{{ jenkins_user }}" - -- name: Create wheelhouse dir - file: - path={{ jenkins_wheel_dir }} state=directory - owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 - -# (need to install each one in the venv to satisfy dependencies) -- name: Create wheel archives - shell: - "{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" && - {{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\" - creates={{ jenkins_wheel_dir }}/${item.wheel}" - sudo_user: "{{ jenkins_user }}" - with_items: jenkins_wheels - -- name: Add wheel_venv.sh script - template: - src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh - owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 diff --git a/playbooks/roles/jenkins_worker/tasks/system.yml b/playbooks/roles/jenkins_worker/tasks/system.yml deleted file mode 100644 index 9722b6d19ef..00000000000 --- a/playbooks/roles/jenkins_worker/tasks/system.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: Create jenkins group - group: name={{ jenkins_group }} state=present - -# The Jenkins account needs a login shell because Jenkins uses scp -- name: Add the jenkins user to the group and configure shell - user: name={{ jenkins_user }} append=yes group={{ jenkins_group }} shell=/bin/bash - -# Because of a bug in the latest release of the EC2 plugin -# we need to use a key generated by Amazon (not imported) -# To satisfy this, we allow users to log in as Jenkins -# using the same keypair the instance was started with. -- name: Create .ssh directory - file: - path={{ jenkins_home }}/.ssh state=directory - owner={{ jenkins_user }} group={{ jenkins_group }} - ignore_errors: yes - -- name: Copy ssh keys for jenkins - command: cp /home/ubuntu/.ssh/authorized_keys /home/{{ jenkins_user }}/.ssh/authorized_keys - ignore_errors: yes - -- name: Set key permissions - file: - path={{ jenkins_home }}/.ssh/authorized_keys - owner={{ jenkins_user }} group={{ jenkins_group }} mode=400 - ignore_errors: yes - -- name: Install system packages - apt: pkg={{','.join(jenkins_debian_pkgs)}} - state=present update_cache=yes - -- name: Add script to set up environment variables - template: - src=jenkins_env.j2 dest={{ jenkins_home }}/jenkins_env - owner={{ jenkins_user }} group={{ jenkins_group }} mode=0500 - -# Need to add Github to known_hosts to avoid -# being prompted when using git through ssh -- name: Add github.com to known_hosts if it does not exist - shell: > - ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts diff --git a/playbooks/roles/jenkins_worker/templates/github_post_status.py.j2 b/playbooks/roles/jenkins_worker/templates/github_post_status.py.j2 deleted file mode 100644 index e04311d8dda..00000000000 --- a/playbooks/roles/jenkins_worker/templates/github_post_status.py.j2 +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python - -""" -Update the status of a GitHub commit. -""" - -import sys -import requests -import json -from textwrap import dedent - -# The Ansible script will fill in the GitHub OAuth token. -# That way, we can give the jenkins user on the worker -# execute-only access to this script, ensuring that -# the jenkins user cannot retrieve the token. -GITHUB_OAUTH_TOKEN = "{{ github_oauth_token }}" - -USAGE = "Usage: {0} ORG REPO SHA STATUS TARGET_URL DESCRIPTION" - -VALID_STATUS_LIST = ['pending', 'success', 'error', 'failure'] - - -def parse_args(arg_list): - """ - Parse the list of arguments, returning a dict. - Prints an error message and exits if the arguments are invalid. - """ - if len(arg_list) != 7: - print USAGE.format(arg_list[0]) - exit(1) - - # Check that the build status is valid - status = arg_list[4] - if not status in VALID_STATUS_LIST: - print "Invalid status: must be one of {0}".format(", ".join(VALID_STATUS_LIST)) - exit(1) - - return { - 'org': arg_list[1], - 'repo': arg_list[2], - 'sha': arg_list[3], - 'status': arg_list[4], - 'target_url': arg_list[5], - 'description': arg_list[6] - } - - -def post_status(org, repo, sha, status, target_url, description): - """ - Post a new status to GitHub. - See http://developer.github.com/v3/repos/statuses/ for details. - - Prints an error message and exits if unsuccessful. - """ - url = "https://api.github.com/repos/%7B0%7D/%7B1%7D/statuses/%7B2%7D?access_token={3}".format( - org, repo, sha, GITHUB_OAUTH_TOKEN - ) - - params = { - 'state': status, - 'target_url': target_url, - 'description': description - } - - response = requests.post(url, data=json.dumps(params)) - - if response.status_code != 201: - print dedent(""" - Could not post status: - HTTP response code is {0} - Content: {1} - """).format(response.status_code, response.text).strip() - exit(1) - - -def main(): - """ - Post the status to GitHub. - """ - if not GITHUB_OAUTH_TOKEN: - print "No GitHub Oauth token configured." - exit(1) - - arg_dict = parse_args(sys.argv) - post_status( - arg_dict['org'], arg_dict['repo'], - arg_dict['sha'], arg_dict['status'], - arg_dict['target_url'], arg_dict['description'] - ) - - -if __name__ == "__main__": - main() diff --git a/playbooks/roles/jenkins_worker/templates/github_pr_auth.py.j2 b/playbooks/roles/jenkins_worker/templates/github_pr_auth.py.j2 deleted file mode 100644 index e81bcbd4a42..00000000000 --- a/playbooks/roles/jenkins_worker/templates/github_pr_auth.py.j2 +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env python - -""" -Determine whether we allow a GitHub PR to be -built automatically. Checks a whitelist -of repo owners and compares to the HEAD -repo of the pull request. - -Uses an environment variable `GITHUB_OWNER_WHITELIST` -to check whether the owner of the PR repo is whitelisted. -This is a comma-separated list of organizations and -users. For example, a bash script might define: - - export GITHUB_OWNER_WHITELIST="edx,a_user,another_user" - -to allow PRs from repos owned by "edx", "a_usr", and "another_user" -""" - -import sys -import os -import requests -from textwrap import dedent - -# The Ansible script will fill in the GitHub OAuth token. -# That way, we can give the jenkins user on the worker -# execute-only access to this script, ensuring that -# the jenkins user cannot retrieve the token. -GITHUB_OAUTH_TOKEN = "{{ github_oauth_token }}" - -USAGE = "Usage: {0} ORG REPO PULL_REQUEST_NUM" - - -def parse_args(arg_list): - """ - Parse the list of arguments, returning a dict of the form - - { - 'org': GITHUB_ORG, - 'repo': GITHUB_REPO, - 'pr_num': GITHUB_PR_NUM - } - - Prints an error message and exits if the arguments are invalid. - """ - if len(arg_list) != 4: - print USAGE.format(arg_list[0]) - exit(1) - - # Retrieve the PR number and check that it's an integer - try: - pr_num = int(arg_list[3]) - except TypeError: - print "'{0}' is not a number".format(arg_list[3]) - - return { - 'org': arg_list[1], - 'repo': arg_list[2], - 'pr_num': pr_num - } - - -def pr_repo_owner(org, repo, pr_num): - """ - Return the name of the owner of the repo from the - HEAD of the PR. - """ - # Query GitHub for information about the pull request - url = "https://api.github.com/repos/%7B0%7D/%7B1%7D/pulls/%7B2%7D?access_token={3}".format( - org, repo, pr_num, GITHUB_OAUTH_TOKEN - ) - response = requests.get(url) - - if response.status_code != 200: - print dedent(""" - Could not retrieve info for pull request #{0}. - HTTP status code: {1} - """.format(pr_num, response.status_code)).strip() - exit(1) - - # Parse the response as json - try: - pr_data = response.json() - except TypeError: - print "Could not parse info for pull request #{0}".format(pr_num) - exit(1) - - # Retrieve the owner of the repo - try: - return pr_data['head']['repo']['owner']['login'] - - except KeyError: - print "Could not get repo owner from PR info" - exit(1) - - -def main(): - """ - Exits with code 0 (success) if the PR is from a whitelisted - repo; otherwise, exits with status 1 (failure). - """ - if not GITHUB_OAUTH_TOKEN: - print "No GitHub Oauth token configured." - exit(1) - - arg_dict = parse_args(sys.argv) - owner = pr_repo_owner(arg_dict['org'], arg_dict['repo'], arg_dict['pr_num']) - - # Check that the owner is whitelisted - whitelist_owners = os.environ.get('GITHUB_OWNER_WHITELIST', '').split(',') - if owner not in whitelist_owners: - print dedent(""" - Owner '{0}' is not in the whitelist. - You can update the whitelist by setting the environment variable - `GITHUB_OWNER_WHITELIST` to a comma-separated list of organizations - and users. - """.format(owner)).strip() - exit(1) - - else: - print "Owner '{0}' is authorized".format(owner) - exit(0) - - -if __name__ == "__main__": - main() diff --git a/playbooks/roles/jenkins_worker/templates/jenkins_env.j2 b/playbooks/roles/jenkins_worker/templates/jenkins_env.j2 deleted file mode 100644 index 06fe44d0365..00000000000 --- a/playbooks/roles/jenkins_worker/templates/jenkins_env.j2 +++ /dev/null @@ -1,11 +0,0 @@ -# Configure Ruby -export GEM_ROOT="{{ jenkins_home }}/.gem" -export GEM_HOME="{{ jenkins_home }}/.gem" -export PATH="{{ jenkins_rbenv_root }}/bin:{{jenkins_rbenv_root }}/shims:{{ jenkins_home }}/.gem/bin:$PATH" -export RBENV_ROOT="{{ jenkins_rbenv_root }}" - -# Configure JavaScript coverage -export JSCOVER_JAR=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar - -# Set the display to the virtual frame buffer (Xvfb) -export DISPLAY=:1 diff --git a/playbooks/roles/jenkins_worker/templates/wheel_venv.sh.j2 b/playbooks/roles/jenkins_worker/templates/wheel_venv.sh.j2 deleted file mode 100644 index 9cd82affd04..00000000000 --- a/playbooks/roles/jenkins_worker/templates/wheel_venv.sh.j2 +++ /dev/null @@ -1,17 +0,0 @@ -#! /usr/bin/env bash - -if [ $# -ne 1 ]; then - echo "Usage: $0 VENV_DIR" - exit 1 -fi - -# Create and activate the new virtualenv -VENV=$1 -mkdir -p $VENV -/usr/local/bin/virtualenv $VENV -. $VENV/bin/activate - -# Install each available wheel archive -ls {{ jenkins_wheel_dir }} | cut -d- -f1 | while read line ; do - pip install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} $line ; -done diff --git a/playbooks/roles/jscover/defaults/main.yml b/playbooks/roles/jscover/defaults/main.yml new file mode 100644 index 00000000000..5f8619302ea --- /dev/null +++ b/playbooks/roles/jscover/defaults/main.yml @@ -0,0 +1,10 @@ +--- +# Installs JSCover jar. +# Java is a pre-requisite for JSCover. This role is not responsible +# for installing Java. +# +jscover_role_name: jscover + +# JSCover direct download URL +JSCOVER_VERSION: "1.0.2" +jscover_url: "http://files.edx.org/testeng/JSCover-%7B%7B%20JSCOVER_VERSION%20%7D%7D.zip" diff --git a/playbooks/roles/jscover/tasks/main.yml b/playbooks/roles/jscover/tasks/main.yml new file mode 100644 index 00000000000..bb0a1a74ef9 --- /dev/null +++ b/playbooks/roles/jscover/tasks/main.yml @@ -0,0 +1,17 @@ +--- + +- name: Download JSCover + get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip + +- name: Unzip JSCover + shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover + creates=/var/tmp/jscover + +- name: Install JSCover JAR + command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ JSCOVER_VERSION }}.jar + creates=/usr/local/bin/JSCover-all-{{ JSCOVER_VERSION }}.jar + +- name: Set JSCover permissions + file: path="/usr/local/bin/JSCover-all-{{ JSCOVER_VERSION }}.jar" state=file + owner=root group=root mode=0755 + diff --git a/playbooks/roles/jwt_signature/defaults/main.yml b/playbooks/roles/jwt_signature/defaults/main.yml new file mode 100644 index 00000000000..74dec19ac6f --- /dev/null +++ b/playbooks/roles/jwt_signature/defaults/main.yml @@ -0,0 +1,32 @@ +# Default variables for the jwt_signature role, automatically loaded +# when the role is included. Can be overridden at time of inclusion. +--- + +# Name of the file to store generated JWT signature settings into. +# This file will have the form: +# JWT_AUTH: +# JWT_PRIVATE_SIGNING_JWK: ... +# JWT_PUBLIC_SIGNING_JWK_SET: ... +# JWT_SIGNING_ALGORITHM: .. +jwt_signature_file: /tmp/lms_jwt_signature.yml + +# these variables are needed to execute the generate_jwt_signing_key management command. +edxapp_env_path: /edx/app/edxapp/edxapp_env +edxapp_venv_dir: /edx/app/edxapp/venvs/edxapp +edxapp_code_dir: /edx/app/edxapp/edx-platform + +# the application config file that we'll inject JWT_AUTH settings into +app_config_file: /edx/etc/lms.yml + +# template file used to re-render app config +app_config_template: roles/edxapp/templates/lms.yml.j2 + +# which user and group owns the app config file, with what perms +app_config_owner: edxapp +app_config_group: www-data +app_config_mode: 0640 + +# The only play that sets this to true is edxapp - +# whenever the edxapp play is executed, the management +# command that generates an updated public JWK set is run. +CAN_GENERATE_NEW_JWT_SIGNATURE: False diff --git a/playbooks/roles/jwt_signature/tasks/main.yml b/playbooks/roles/jwt_signature/tasks/main.yml new file mode 100644 index 00000000000..7a834678061 --- /dev/null +++ b/playbooks/roles/jwt_signature/tasks/main.yml @@ -0,0 +1,61 @@ +# Generate JWT signature settings (probably if you're on sandbox) + +- name: create JWT signature settings + shell: . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms generate_jwt_signing_key --output-file {{ jwt_signature_file }} --strip-key-prefix + args: + chdir: "{{ edxapp_code_dir }}" + when: CAN_GENERATE_NEW_JWT_SIGNATURE + +- name: fetch JWT signature settings from host + fetch: + src: "{{ jwt_signature_file }}" + # this will save into /tmp/{{ inventory_hostname }}/{{ jwt_signature_file }} on host + dest: /tmp + +- name: read JWT signature settings + include_vars: + file: "/tmp/{{ inventory_hostname }}/{{ jwt_signature_file }}" + name: lms_jwt_signature + +- name: fetch app config from host + fetch: + src: "{{ app_config_file }}" + # this will save into /tmp/{{ inventory_hostname }}/{{ app_config_file }} on host + dest: /tmp + +- name: read app config into variable + include_vars: + file: "/tmp/{{ inventory_hostname }}/{{ app_config_file }}" + name: app_config_vars + +- name: combine app config with jwt_signature config + set_fact: + app_combined_config: '{{ app_config_vars | combine(lms_jwt_signature, recursive=True) }}' + +- name: render app config with jwt signature to yaml file + template: + src: roles/jwt_signature/templates/app_config.yml.j2 + dest: "{{ app_config_file }}" + owner: "{{ app_config_owner }}" + group: "{{ app_config_group }}" + mode: "{{ app_config_mode }}" + +- name: delete JWT signature file on host + file: + path: "/tmp/{{ inventory_hostname }}/{{ jwt_signature_file }}" + state: absent + +- name: delete app config file on host + file: + path: "/tmp/{{ inventory_hostname }}/{{ app_config_file }}" + state: absent + +# The app must be restarted so that the config file variables +# are loaded into the Django settings. +- name: restart the application to load JWT signature settings + supervisorctl: + name: "{{ app_name }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + become_user: "{{ supervisor_service_user }}" diff --git a/playbooks/roles/jwt_signature/templates/app_config.yml.j2 b/playbooks/roles/jwt_signature/templates/app_config.yml.j2 new file mode 100644 index 00000000000..7dc3a5c3d8d --- /dev/null +++ b/playbooks/roles/jwt_signature/templates/app_config.yml.j2 @@ -0,0 +1,3 @@ +{% if app_combined_config %} +{{ app_combined_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/kibana/defaults/main.yml b/playbooks/roles/kibana/defaults/main.yml new file mode 100644 index 00000000000..b688dc84f38 --- /dev/null +++ b/playbooks/roles/kibana/defaults/main.yml @@ -0,0 +1,8 @@ +--- +KIBANA_SERVER_NAME: "192.168.33.10" +KIBANA_NGINX_PORT: 80 +KIBANA_SSL_NGINX_PORT: 443 + +kibana_app_dir: /edx/app/kibana +kibana_file: kibana-3.0.0.tar.gz +kibana_url: "https://download.elasticsearch.org/kibana/kibana/%7B%7B%20kibana_file%20%7D%7D" diff --git a/playbooks/roles/kibana/files/sample_dashboard.json b/playbooks/roles/kibana/files/sample_dashboard.json new file mode 100644 index 00000000000..9ddcd1abc2e --- /dev/null +++ b/playbooks/roles/kibana/files/sample_dashboard.json @@ -0,0 +1,562 @@ +{ + "title": "edX Log Analysis", + "services": { + "query": { + "idQueue": [], + "list": { + "0": { + "query": "@message: WARNING", + "alias": "", + "color": "#EAB839", + "id": 0, + "pin": false, + "type": "lucene", + "enable": true + }, + "1": { + "id": 1, + "color": "#7EB26D", + "query": "@message: INFO", + "alias": "", + "pin": false, + "type": "lucene", + "enable": true + }, + "2": { + "id": 2, + "color": "#BF1B00", + "query": "@message: ERROR", + "alias": "", + "pin": false, + "type": "lucene", + "enable": true + }, + "3": { + "id": 3, + "color": "#F9D9F9", + "query": "*", + "alias": "", + "pin": false, + "type": "lucene", + "enable": true + } + }, + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "filter": { + "idQueue": [ + 1, + 2, + 3 + ], + "list": { + "0": { + "type": "time", + "field": "@timestamp", + "from": "now-1h", + "to": "now", + "mandate": "must", + "active": true, + "alias": "", + "id": 0 + }, + "1": { + "type": "querystring", + "query": "*pika*", + "mandate": "mustNot", + "active": true, + "alias": "", + "id": 1 + }, + "2": { + "type": "querystring", + "query": "*connectionpool*", + "mandate": "mustNot", + "active": true, + "alias": "", + "id": 3 + } + }, + "ids": [ + 0, + 1, + 2 + ] + } + }, + "rows": [ + { + "title": "Graph", + "height": "350px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "span": 12, + "editable": true, + "group": [ + "default" + ], + "type": "histogram", + "mode": "count", + "time_field": "@timestamp", + "value_field": null, + "auto_int": true, + "resolution": 100, + "interval": "30s", + "fill": 3, + "linewidth": 3, + "timezone": "browser", + "spyable": true, + "zoomlinks": true, + "bars": false, + "stack": true, + "points": false, + "lines": true, + "legend": true, + "x-axis": true, + "y-axis": true, + "percentage": false, + "interactive": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "title": "Events over time", + "intervals": [ + "auto", + "1s", + "1m", + "5m", + "10m", + "30m", + "1h", + "3h", + "12h", + "1d", + "1w", + "1M", + "1y" + ], + "options": true, + "tooltip": { + "value_type": "cumulative", + "query_as_alias": true + }, + "scale": 1, + "y_format": "none", + "grid": { + "max": null, + "min": 0 + }, + "annotate": { + "enable": false, + "query": "*", + "size": 20, + "field": "_type", + "sort": [ + "_score", + "desc" + ] + }, + "pointradius": 5, + "show_query": true, + "legend_counts": true, + "zerofill": true, + "derivative": false + } + ], + "notice": false + }, + { + "title": "Charts", + "height": "250px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "span": 4, + "editable": true, + "type": "hits", + "loadingEditor": false, + "query": { + "field": "syslog_severity", + "goal": 100 + }, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "size": 10, + "exclude": [], + "donut": true, + "tilt": true, + "legend": "above", + "labels": true, + "mode": "terms", + "default_field": "DEFAULT", + "spyable": true, + "title": "Log Severity", + "style": { + "font-size": "10pt" + }, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "above" + }, + { + "span": 4, + "editable": true, + "type": "hits", + "loadingEditor": false, + "query": { + "field": "@source_host", + "goal": 100 + }, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "size": 10, + "exclude": [], + "donut": true, + "tilt": true, + "legend": "above", + "labels": true, + "mode": "terms", + "default_field": "DEFAULT", + "spyable": true, + "title": "Logs by Host", + "style": { + "font-size": "10pt" + }, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "above" + }, + { + "span": 4, + "editable": true, + "type": "hits", + "loadingEditor": false, + "style": { + "font-size": "10pt" + }, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "above", + "donut": true, + "tilt": true, + "labels": true, + "spyable": true, + "queries": { + "mode": "selected", + "ids": [ + 0, + 1, + 2 + ] + }, + "title": "Percent by Python Severity" + } + ], + "notice": false + }, + { + "title": "Trends", + "height": "50px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "span": 4, + "editable": true, + "type": "trends", + "loadingEditor": false, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "style": { + "font-size": "14pt" + }, + "ago": "1h", + "arrangement": "vertical", + "spyable": true, + "title": "Hourly" + }, + { + "span": 4, + "editable": true, + "type": "trends", + "loadingEditor": false, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "style": { + "font-size": "14pt" + }, + "ago": "1d", + "arrangement": "vertical", + "spyable": true, + "title": "Daily" + }, + { + "span": 4, + "editable": true, + "type": "trends", + "loadingEditor": false, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "style": { + "font-size": "14pt" + }, + "ago": "1w", + "arrangement": "vertical", + "spyable": true, + "title": "Weekly" + } + ], + "notice": false + }, + { + "title": "Error Events", + "height": "550px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "error": false, + "span": 12, + "editable": true, + "type": "table", + "loadingEditor": false, + "status": "Stable", + "queries": { + "mode": "selected", + "ids": [ + 2 + ] + }, + "size": 100, + "pages": 5, + "offset": 0, + "sort": [ + "@timestamp", + "desc" + ], + "group": "default", + "style": { + "font-size": "9pt" + }, + "overflow": "min-height", + "fields": [ + "@timestamp", + "@source_host", + "message" + ], + "highlight": [], + "sortable": true, + "header": true, + "paging": true, + "field_list": true, + "all_fields": false, + "trimFactor": 300, + "normTimes": true, + "spyable": true, + "title": "Errors", + "localTime": false, + "timeField": "@timestamp" + } + ], + "notice": false + }, + { + "title": "Events", + "height": "350px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "title": "All events", + "error": false, + "span": 12, + "editable": true, + "group": [ + "default" + ], + "type": "table", + "size": 100, + "pages": 5, + "offset": 0, + "sort": [ + "@timestamp", + "desc" + ], + "style": { + "font-size": "9pt" + }, + "overflow": "min-height", + "fields": [ + "@source_host", + "message" + ], + "highlight": [], + "sortable": true, + "header": true, + "paging": true, + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "field_list": true, + "status": "Stable", + "trimFactor": 300, + "normTimes": true, + "all_fields": false, + "localTime": false, + "timeField": "@timestamp" + } + ], + "notice": false + } + ], + "editable": true, + "failover": false, + "index": { + "interval": "day", + "pattern": "[logstash-]YYYY.MM.DD", + "default": "NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED", + "warm_fields": true + }, + "style": "dark", + "panel_hints": true, + "pulldowns": [ + { + "type": "query", + "collapse": false, + "notice": false, + "query": "*", + "pinned": true, + "history": [ + "*", + "@message: ERROR", + "@message: INFO", + "@message: WARNING", + "@message: WARN", + "*corresponding*", + "@message: INFO OR syslog_severity: info", + "@message: INFO OR @log_severity: info", + "ERROR", + "WARNING" + ], + "remember": 10, + "enable": true + }, + { + "type": "filtering", + "collapse": true, + "notice": false, + "enable": true + } + ], + "nav": [ + { + "type": "timepicker", + "collapse": false, + "notice": false, + "status": "Stable", + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ], + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "timefield": "@timestamp", + "now": true, + "filter_id": 0, + "enable": true + } + ], + "loader": { + "save_gist": false, + "save_elasticsearch": true, + "save_local": true, + "save_default": true, + "save_temp": true, + "save_temp_ttl_enable": true, + "save_temp_ttl": "30d", + "load_gist": true, + "load_elasticsearch": true, + "load_elasticsearch_size": 20, + "load_local": true, + "hide": false + }, + "refresh": "1m" +} \ No newline at end of file diff --git a/playbooks/roles/kibana/handlers/main.yml b/playbooks/roles/kibana/handlers/main.yml new file mode 100644 index 00000000000..81b7f6b7092 --- /dev/null +++ b/playbooks/roles/kibana/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart nginx + service: name=nginx state=restarted + +- name: reload nginx + service: name=nginx state=reloaded diff --git a/playbooks/roles/kibana/meta/default.yml b/playbooks/roles/kibana/meta/default.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/kibana/meta/default.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/kibana/tasks/main.yml b/playbooks/roles/kibana/tasks/main.yml new file mode 100644 index 00000000000..65f634ebd18 --- /dev/null +++ b/playbooks/roles/kibana/tasks/main.yml @@ -0,0 +1,53 @@ +# requires: +# - oraclejdk +# - elasticsearch +# - nginx +--- + +- name: Ensure app apt dependencies are installed + apt: pkg={{ item }} state=installed + with_items: + - python-software-properties + - git + - nginx + +- name: Ensure {{ kibana_app_dir }} exists + file: + path: "{{ kibana_app_dir }}" + state: directory + owner: root + group: root + mode: 0755 + +- name: Ensure subdirectories exist + file: + path: "{{ kibana_app_dir }}/{{ item }}" + owner: root + group: root + mode: 0755 + state: directory + with_items: + - htdocs + - share + +- name: ensure we have the specified kibana release + get_url: + url: "{{ kibana_url }}" + dest: "{{ kibana_app_dir }}/share/{{ kibana_file }}" + +- name: extract + shell: "tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}" + args: + chdir: "{{ kibana_app_dir }}/share" + creates: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}" + +- name: install + shell: "cp -R * {{ kibana_app_dir }}/htdocs/" + args: + chdir: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}" + +- name: copy config + template: + src: config.js.j2 + dest: "{{ kibana_app_dir }}/htdocs/config.js" + diff --git a/playbooks/roles/kibana/templates/config.js.j2 b/playbooks/roles/kibana/templates/config.js.j2 new file mode 100644 index 00000000000..31885f69e99 --- /dev/null +++ b/playbooks/roles/kibana/templates/config.js.j2 @@ -0,0 +1,62 @@ +/** + * These is the app's configuration, If you need to configure + * the default dashboard, please see dashboards/default + */ +define(['settings'], +function (Settings) { + + + return new Settings({ + + /** + * URL to your elasticsearch server. You almost certainly don't + * want 'http://localhost:9200/' here. Even if Kibana and ES are on + * the same host + * + * By default this will attempt to reach ES at the same host you have + * elasticsearch installed on. You probably want to set it to the FQDN of your + * elasticsearch host + * @type {String} + */ + //elasticsearch: "http://"+window.location.hostname+":9200", + {% if NGINX_ENABLE_SSL %} + + elasticsearch: "https://{{ KIBANA_SERVER_NAME }}:{{ KIBANA_SSL_NGINX_PORT }}/e", + + {% else %} + + elasticsearch: "http://{{ KIBANA_SERVER_NAME }}:{{ KIBANA_NGINX_PORT }}/e", + + {% endif %} + + /** + * The default ES index to use for storing Kibana specific object + * such as stored dashboards + * @type {String} + */ + kibana_index: "kibana-int", + + /** + * Panel modules available. Panels will only be loaded when they are defined in the + * dashboard, but this list is used in the "add panel" interface. + * @type {Array} + */ + panel_names: [ + 'histogram', + 'map', + 'table', + 'filtering', + 'timepicker', + 'text', + 'hits', + 'column', + 'trends', + 'bettermap', + 'query', + 'terms', + 'stats', + 'sparklines', + 'goal', + ] + }); +}); diff --git a/playbooks/roles/launch_ec2/tasks/main.yml b/playbooks/roles/launch_ec2/tasks/main.yml index 5e8c8cc9e0e..33ccab3c2a1 100644 --- a/playbooks/roles/launch_ec2/tasks/main.yml +++ b/playbooks/roles/launch_ec2/tasks/main.yml @@ -18,10 +18,10 @@ - name: terminating single instance local_action: - module: ec2_local + module: ec2 state: 'absent' region: "{{ region }}" - instance_ids: ${tag_lookup.instance_ids} + instance_ids: "{{tag_lookup.instance_ids}}" when: terminate_instance == true and tag_lookup.instance_ids|length == 1 - name: deregister instance from an an elb if it was in one @@ -36,17 +36,26 @@ - name: Launch ec2 instance local_action: - module: ec2_local + module: ec2 keypair: "{{ keypair }}" group: "{{ security_group }}" instance_type: "{{ instance_type }}" + instance_initiated_shutdown_behavior: "{{ instance_initiated_shutdown_behavior }}" image: "{{ ami }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + assign_public_ip: yes wait: true region: "{{ region }}" - instance_tags: "{{instance_tags}}" - root_ebs_size: "{{ root_ebs_size }}" + instance_tags: "{{ instance_tags }}" + volumes: + - device_name: /dev/sda1 + volume_size: "{{ root_ebs_size }}" + delete_on_termination: true + volume_type: "gp2" + encrypted: true zone: "{{ zone }}" instance_profile_name: "{{ instance_profile_name }}" + user_data: "{{ user_data }}" register: ec2 - name: Add DNS name @@ -59,9 +68,13 @@ ttl: 300 record: "{{ dns_name }}.{{ dns_zone }}" value: "{{ item.public_dns_name }}" - with_items: ec2.instances + register: task_result + until: task_result is succeeded + retries: 5 + delay: 30 + with_items: "{{ ec2.instances }}" -- name: Add DNS name studio +- name: Add DNS names for services local_action: module: route53 overwrite: yes @@ -69,36 +82,34 @@ zone: "{{ dns_zone }}" type: CNAME ttl: 300 - record: "studio.{{ dns_name }}.{{ dns_zone }}" - value: "{{ item.public_dns_name }}" - with_items: ec2.instances - -- name: Add DNS name preview - local_action: - module: route53 - overwrite: yes - command: create - zone: "{{ dns_zone }}" - type: CNAME - ttl: 300 - record: "preview.{{ dns_name }}.{{ dns_zone }}" - value: "{{ item.public_dns_name }}" - with_items: ec2.instances - + record: "{{ item[1] }}-{{ dns_name }}.{{ dns_zone }}" + value: "{{ item[0].public_dns_name }}" + register: task_result + until: task_result is succeeded + retries: 5 + delay: 30 + with_nested: + - "{{ ec2.instances }}" + - ['studio', 'ecommerce', 'preview', 'discovery', 'credentials', 'veda', 'analytics-api', 'registrar', 'program-console', + 'learner-portal', 'prospectus', 'authn', 'payment', 'license-manager', 'learning', 'enterprise-catalog', 'ora-grading', + 'course-authoring','library-authoring', 'commerce-coordinator', 'edx-exams', 'subscriptions', 'profile', 'learner-dashboard'] - name: Add new instance to host group - local_action: > - add_host - hostname={{ item.public_ip }} - groupname=launched - with_items: ec2.instances + local_action: + module: add_host + hostname: "{{ item.public_ip }}" + groups: launched + with_items: "{{ ec2.instances }}" - name: Wait for SSH to come up - local_action: > - wait_for - host={{ item.public_dns_name }} - state=started - port=22 - delay=60 - timeout=320 - with_items: ec2.instances + local_action: + module: wait_for + host: "{{ item.public_dns_name }}" + search_regex: OpenSSH + port: 22 + delay: 10 + with_items: "{{ ec2.instances }}" + +- name: Wait for python to install + pause: + minutes: "{{ launch_ec2_wait_time }}" diff --git a/playbooks/roles/learner_portal/defaults/main.yml b/playbooks/roles/learner_portal/defaults/main.yml new file mode 100644 index 00000000000..751544d2833 --- /dev/null +++ b/playbooks/roles/learner_portal/defaults/main.yml @@ -0,0 +1,42 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# +learner_portal_home: '{{ COMMON_APP_DIR }}/{{ learner_portal_service_name }}' +NVM_DIR: '{{ learner_portal_home }}' +learner_portal_user: 'root' +learner_portal_git_identity: 'none' +edx_django_service_use_python3: false +learner_portal_repo: 'https://github.com/openedx/frontend-app-learner-portal-programs.git' +LEARNER_PORTAL_VERSION: 'master' +learner_portal_service_name: 'learner_portal' +LEARNER_PORTAL_NODE_VERSION: '18.17.0' + +learner_portal_nodeenv_dir: '{{ learner_portal_home }}/nodeenvs/{{ learner_portal_service_name }}' +learner_portal_nodeenv_bin: '{{learner_portal_nodeenv_dir}}/bin' +learner_portal_app_dir: "{{ COMMON_APP_DIR }}/learner_portal" +learner_portal_code_dir: "{{ learner_portal_app_dir }}/learner_portal" +learner_portal_dist_dir: "{{ learner_portal_code_dir }}/dist" +learner_portal_env_vars: + PATH: "{{ learner_portal_nodeenv_bin }}:{{ ansible_env.PATH }}" + NODE_ENV: "production" + ACTIVE_ENV: "production" + BASE_URL: 'https://learner-portal-{{ COMMON_LMS_BASE_URL }}' + LMS_BASE_URL: '{{ COMMON_LMS_BASE_URL }}' + LOGIN_URL: '{{ COMMON_LMS_BASE_URL }}/login' + LOGOUT_URL: '{{ COMMON_LMS_BASE_URL }}/logout' + CSRF_TOKEN_API_PATH: '/csrf/api/v1/token' + REFRESH_ACCESS_TOKEN_ENDPOINT: '{{ COMMON_LMS_BASE_URL }}/login_refresh' + ACCESS_TOKEN_COOKIE_NAME: 'edx-jwt-cookie-header-payload' + USER_INFO_COOKIE_NAME: 'edx-user-info' + DESIGNER_BASE_URL: '' + HOST_NAME: '' + SEGMENT_KEY: '' + MOCK_DATA: true diff --git a/playbooks/roles/learner_portal/meta/main.yml b/playbooks/roles/learner_portal/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/learner_portal/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/learner_portal/tasks/main.yml b/playbooks/roles/learner_portal/tasks/main.yml new file mode 100644 index 00000000000..ab5c386d30f --- /dev/null +++ b/playbooks/roles/learner_portal/tasks/main.yml @@ -0,0 +1,88 @@ +- name: Remove old git repo + file: + state: absent + path: "{{ learner_portal_code_dir }}/" + +- name: Remove old app repo + file: + state: absent + path: "{{ learner_portal_app_dir }}" + +- name: Create learner_portal app folder + file: + path: "{{ learner_portal_app_dir }}" + state: directory + owner: "{{ learner_portal_user }}" + group: "{{ learner_portal_user }}" + +- name: Checkout learner_portal repo into {{ learner_portal_code_dir }} + git: + dest: "{{ learner_portal_code_dir }}" + repo: "{{ learner_portal_repo }}" + version: "{{ LEARNER_PORTAL_VERSION }}" + accept_hostkey: yes + become_user: "{{ learner_portal_user }}" + register: learner_portal_checkout + +- name: Install nodeenv + apt: + name: nodeenv + become_user: "{{ learner_portal_user }}" + environment: "{{ learner_portal_env_vars }}" + tags: + - install + - install:system-requirements + +# Install node +- name: Create nodeenv + shell: "nodeenv {{ learner_portal_nodeenv_dir }} --node={{ LEARNER_PORTAL_NODE_VERSION }} --prebuilt --force" + become_user: "{{ learner_portal_user }}" + environment: "{{ learner_portal_env_vars }}" + tags: + - install + - install:system-requirements + +# Set the npm registry +# This needs to be done as root since npm is weird about +# chown - https://github.com/npm/npm/issues/3565 +- name: Set the npm registry + shell: "{{ learner_portal_nodeenv_bin }}/npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'" + args: + creates: "{{ learner_portal_code_dir }}/.npmrc" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements + +# we need to do this so that npm can find a node install to use to build node-sass +- name: prepend node path + shell: "{{ learner_portal_nodeenv_bin }}/npm config set scripts-prepend-node-path true" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements + +# install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +# The version of ansible we are using also does not make use of "--unsafe-perm", which we need for node-sass +- name: Install node dependencies + shell: "sudo {{ learner_portal_nodeenv_bin }}/node {{ learner_portal_nodeenv_bin }}/npm i --unsafe-perm" + args: + chdir: "{{ learner_portal_code_dir }}" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements + +# install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: Run learner_portal build + shell: "npm run build" + args: + chdir: "{{ learner_portal_code_dir }}" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements diff --git a/playbooks/roles/legacy_ora/tasks/main.yml b/playbooks/roles/legacy_ora/tasks/main.yml deleted file mode 100644 index 2c01e2c6a92..00000000000 --- a/playbooks/roles/legacy_ora/tasks/main.yml +++ /dev/null @@ -1,44 +0,0 @@ -# -# Update config for a legacy ora installation. -# -# This role requires that ora_app_dir and ora_user both be defined. -# There is no default for them. -# -- fail: msg="ora_app_dir not defined. eg. /edx/app/ora, /opt/wwc" - when: ora_app_dir is not defined - -- fail: msg="ora_user not defined. eg. ora, www-data" - when: ora_user is not defined - -- fail: msg="COMMON_ENV_TYPE not defined. eg. stage, prod" - when: COMMON_ENV_TYPE is not defined - -- fail: msg="secure_dir not defined. This is a path to the secure ora config file." - when: secure_dir is not defined - -- name: create ora application config - copy: - src={{secure_dir}}/files/{{COMMON_ENVIRONMENT}}/legacy_ora/ora.env.json - dest={{ora_app_dir}}/env.json - sudo_user: "{{ ora_user }}" - register: env_state - -- name: create ora auth file - copy: - src={{secure_dir}}/files/{{COMMON_ENVIRONMENT}}/legacy_ora/ora.auth.json - dest={{ora_app_dir}}/auth.json - sudo_user: "{{ ora_user }}" - register: auth_state - -# Restart ORA Services -- name: restart edx-ora - service: - name=edx-ora - state=restarted - when: env_state.changed or auth_state.changed - -- name: restart edx-ora-celery - service: - name=edx-ora-celery - state=restarted - when: env_state.changed or auth_state.changed diff --git a/playbooks/roles/library_authoring/defaults/main.yml b/playbooks/roles/library_authoring/defaults/main.yml new file mode 100644 index 00000000000..e07157b6d70 --- /dev/null +++ b/playbooks/roles/library_authoring/defaults/main.yml @@ -0,0 +1,2 @@ +library_authoring_env_extra: + STUDIO_BASE_URL: 'https://studio-{{common_deploy_hostname}}/' \ No newline at end of file diff --git a/playbooks/roles/library_authoring/meta/main.yml b/playbooks/roles/library_authoring/meta/main.yml new file mode 100644 index 00000000000..3b786d6eea7 --- /dev/null +++ b/playbooks/roles/library_authoring/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx \ No newline at end of file diff --git a/playbooks/roles/library_authoring/tasks/main.yml b/playbooks/roles/library_authoring/tasks/main.yml new file mode 100644 index 00000000000..45524e163cc --- /dev/null +++ b/playbooks/roles/library_authoring/tasks/main.yml @@ -0,0 +1,5 @@ +- name: Build Library Authoring MFE + include_role: + name: mfe + vars: + MFE_ENVIRONMENT_EXTRA: '{{ library_authoring_env_extra | default(MFE_DEPLOY_ENVIRONMENT_EXTRA) }}' \ No newline at end of file diff --git a/playbooks/roles/license_manager/defaults/main.yml b/playbooks/roles/license_manager/defaults/main.yml new file mode 100644 index 00000000000..1b356f7c25a --- /dev/null +++ b/playbooks/roles/license_manager/defaults/main.yml @@ -0,0 +1,168 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role license_manager +# + +LICENSE_MANAGER_GIT_IDENTITY: !!null + +LICENSE_MANAGER_ENABLED: True + +# +# vars are namespace with the module name. +# +license_manager_service_name: 'license_manager' + +license_manager_user: "{{ license_manager_service_name }}" +license_manager_home: "{{ COMMON_APP_DIR }}/{{ license_manager_service_name }}" +license_manager_app_dir: "{{ COMMON_APP_DIR }}/{{ license_manager_service_name }}" +license_manager_code_dir: "{{ license_manager_app_dir }}/{{ license_manager_service_name }}" +license_manager_venvs_dir: "{{ license_manager_app_dir }}/venvs" +license_manager_venv_dir: "{{ license_manager_venvs_dir }}/license_manager" +license_manager_celery_default_queue: 'license_manager.default' +license_manager_hostname: "license-manager" + +LICENSE_MANAGER_USE_PYTHON38: True + +LICENSE_MANAGER_CELERY_ALWAYS_EAGER: false +LICENSE_MANAGER_CELERY_BROKER_TRANSPORT: '' +LICENSE_MANAGER_CELERY_BROKER_USER: '' +LICENSE_MANAGER_CELERY_BROKER_PASSWORD: '' +LICENSE_MANAGER_CELERY_BROKER_HOSTNAME: '' +LICENSE_MANAGER_CELERY_BROKER_VHOST: '' + +license_manager_environment: + LICENSE_MANAGER_CFG: '{{ COMMON_CFG_DIR }}/{{ license_manager_service_name }}.yml' + +license_manager_gunicorn_port: 18170 + +license_manager_debian_pkgs: [] + +LICENSE_MANAGER_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ COMMON_GIT_PATH }}' + REPO: 'license-manager.git' + VERSION: '{{ LICENSE_MANAGER_VERSION }}' + DESTINATION: "{{ license_manager_code_dir }}" + SSH_KEY: '{{ LICENSE_MANAGER_GIT_IDENTITY }}' + +LICENSE_MANAGER_NGINX_PORT: '1{{ license_manager_gunicorn_port }}' +LICENSE_MANAGER_SSL_NGINX_PORT: '4{{ license_manager_gunicorn_port }}' + +LICENSE_MANAGER_DEFAULT_DB_NAME: 'license-manager' +LICENSE_MANAGER_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +LICENSE_MANAGER_MYSQL_USER: 'license-manager001' +LICENSE_MANAGER_MYSQL_PASSWORD: 'password' +LICENSE_MANAGER_MYSQL_CONN_MAX_AGE: 60 + +LICENSE_MANAGER_MEMCACHE: [ 'memcache' ] + +LICENSE_MANAGER_DJANGO_SETTINGS_MODULE: 'license_manager.settings.production' +LICENSE_MANAGER_DOMAIN: 'localhost' +LICENSE_MANAGER_URL_ROOT: 'http://{{ LICENSE_MANAGER_DOMAIN }}:{{ LICENSE_MANAGER_NGINX_PORT }}' +LICENSE_MANAGER_API_ROOT: '{{ LICENSE_MANAGER_URL_ROOT }}/api' +LICENSE_MANAGER_LOGOUT_URL: '{{ LICENSE_MANAGER_URL_ROOT }}/logout/' + +LICENSE_MANAGER_LANG: 'en_US.UTF-8' +LICENSE_MANAGER_LANGUAGE_CODE: 'en' +LICENSE_MANAGER_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +LICENSE_MANAGER_SERVICE_USER: 'license_manager_service_user' + +LICENSE_MANAGER_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ license_manager_service_name }}' +LICENSE_MANAGER_MEDIA_ROOT: '{{ LICENSE_MANAGER_DATA_DIR }}/media' +LICENSE_MANAGER_MEDIA_URL: '/api/media/' + +LICENSE_MANAGER_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ LICENSE_MANAGER_MEDIA_ROOT }}' + MEDIA_URL: '{{ LICENSE_MANAGER_MEDIA_URL }}' + +# TODO: Let edx_django_service manage LICENSE_MANAGER_STATIC_ROOT in phase 2. +LICENSE_MANAGER_STATIC_ROOT: '{{ LICENSE_MANAGER_DATA_DIR }}/staticfiles' +LICENSE_MANAGER_STATIC_URL: '/static/' + +LICENSE_MANAGER_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +LICENSE_MANAGER_CORS_ORIGIN_ALLOW_ALL: false +LICENSE_MANAGER_CORS_ORIGIN_WHITELIST: [] + +LICENSE_MANAGER_CSRF_COOKIE_SECURE: false +LICENSE_MANAGER_CSRF_TRUSTED_ORIGINS: [] + +LICENSE_MANAGER_VERSION: 'master' + +LICENSE_MANAGER_GUNICORN_EXTRA: '' + +LICENSE_MANAGER_EXTRA_APPS: [] + +LICENSE_MANAGER_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +LICENSE_MANAGER_CERTIFICATE_LANGUAGES: + 'en': 'English' + 'es_419': 'Spanish' + +# Used to automatically configure OAuth2 Client +LICENSE_MANAGER_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'license_manager-sso-key' +LICENSE_MANAGER_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'license_manager-sso-secret' +LICENSE_MANAGER_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'license_manager-backend-service-key' +LICENSE_MANAGER_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'license_manager-backend-service-secret' +LICENSE_MANAGER_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +# API key for segment.io +LICENSE_MANAGER_SEGMENT_KEY: !!null + +LICENSE_MANAGER_DISCOVERY_BASE_URL: !!null +LICENSE_MANAGER_LMS_BASE_URL: !!null + +license_manager_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ LICENSE_MANAGER_CERTIFICATE_LANGUAGES }}' + LICENSE_MANAGER_SERVICE_USER: '{{ LICENSE_MANAGER_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ LICENSE_MANAGER_LANGUAGE_COOKIE_NAME }}' + SEGMENT_KEY: "{{ LICENSE_MANAGER_SEGMENT_KEY }}" + DISCOVERY_BASE_URL: "{{ LICENSE_MANAGER_DISCOVERY_BASE_URL }}" + LMS_BASE_URL: "{{ LICENSE_MANAGER_LMS_BASE_URL }}" + CORS_ORIGIN_WHITELIST: "{{ LICENSE_MANAGER_CORS_ORIGIN_WHITELIST }}" + CSRF_TRUSTED_ORIGINS: "{{ LICENSE_MANAGER_CSRF_TRUSTED_ORIGINS }}" + CSRF_COOKIE_SECURE: "{{ LICENSE_MANAGER_CSRF_COOKIE_SECURE }}" + CELERY_ALWAYS_EAGER: '{{ LICENSE_MANAGER_CELERY_ALWAYS_EAGER }}' + CELERY_BROKER_TRANSPORT: '{{ LICENSE_MANAGER_CELERY_BROKER_TRANSPORT }}' + CELERY_BROKER_USER: '{{ LICENSE_MANAGER_CELERY_BROKER_USER }}' + CELERY_BROKER_PASSWORD: '{{ LICENSE_MANAGER_CELERY_BROKER_PASSWORD }}' + CELERY_BROKER_HOSTNAME: '{{ LICENSE_MANAGER_CELERY_BROKER_HOSTNAME }}' + CELERY_BROKER_VHOST: '{{ LICENSE_MANAGER_CELERY_BROKER_VHOST }}' + CELERY_DEFAULT_EXCHANGE: 'license_manager' + CELERY_DEFAULT_ROUTING_KEY: 'license_manager' + CELERY_DEFAULT_QUEUE: '{{ license_manager_celery_default_queue }}' + +# See edx_django_service_automated_users for an example of what this should be +LICENSE_MANAGER_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) + +LICENSE_MANAGER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +LICENSE_MANAGER_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +LICENSE_MANAGER_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +LICENSE_MANAGER_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# Worker settings +worker_django_settings_module: "{{ LICENSE_MANAGER_DJANGO_SETTINGS_MODULE }}" +LICENSE_MANAGER_CELERY_WORKERS: + - queue: '{{ license_manager_celery_default_queue }}' + concurrency: 1 + monitor: True +license_manager_workers: "{{ LICENSE_MANAGER_CELERY_WORKERS }}" + +license_manager_post_migrate_commands: [] diff --git a/playbooks/roles/license_manager/meta/main.yml b/playbooks/roles/license_manager/meta/main.yml new file mode 100644 index 00000000000..dd95652ffa1 --- /dev/null +++ b/playbooks/roles/license_manager/meta/main.yml @@ -0,0 +1,56 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role license_manager +# +dependencies: + - role: edx_django_service + edx_django_service_use_python38: '{{ LICENSE_MANAGER_USE_PYTHON38 }}' + edx_django_service_version: '{{ LICENSE_MANAGER_VERSION }}' + edx_django_service_name: '{{ license_manager_service_name }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ license_manager_service_name }}' + edx_django_service_user: '{{ license_manager_user }}' + edx_django_service_config_overrides: '{{ license_manager_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ license_manager_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ license_manager_gunicorn_port }}' + edx_django_service_django_settings_module: '{{ LICENSE_MANAGER_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ license_manager_environment }}' + edx_django_service_gunicorn_extra: '{{ LICENSE_MANAGER_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ LICENSE_MANAGER_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ LICENSE_MANAGER_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ LICENSE_MANAGER_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ LICENSE_MANAGER_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ LICENSE_MANAGER_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ LICENSE_MANAGER_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ LICENSE_MANAGER_MEMCACHE }}' + edx_django_service_default_db_host: '{{ LICENSE_MANAGER_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ LICENSE_MANAGER_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ LICENSE_MANAGER_MYSQL_USER }}' + edx_django_service_db_password: '{{ LICENSE_MANAGER_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ LICENSE_MANAGER_MYSQL_CONN_MAX_AGE }}' + edx_django_service_extra_apps: '{{ LICENSE_MANAGER_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ LICENSE_MANAGER_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ LICENSE_MANAGER_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ LICENSE_MANAGER_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ LICENSE_MANAGER_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ LICENSE_MANAGER_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ LICENSE_MANAGER_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ LICENSE_MANAGER_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ license_manager_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ LICENSE_MANAGER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_api_root: '{{ LICENSE_MANAGER_API_ROOT }}' + edx_django_service_decrypt_config_enabled: '{{ LICENSE_MANAGER_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ LICENSE_MANAGER_COPY_CONFIG_ENABLED }}' + edx_django_service_migration_check_services: '{{ license_manager_service_name }},{{ license_manager_service_name }}-workers' + edx_django_service_enable_celery_workers: true + edx_django_service_workers: '{{ license_manager_workers }}' + edx_django_service_repos: '{{ LICENSE_MANAGER_REPOS }}' + edx_django_service_hostname: '~^((stage|prod)-)?{{ license_manager_hostname }}.*' diff --git a/playbooks/roles/license_manager/tasks/main.yml b/playbooks/roles/license_manager/tasks/main.yml new file mode 100644 index 00000000000..81c41249564 --- /dev/null +++ b/playbooks/roles/license_manager/tasks/main.yml @@ -0,0 +1,23 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role license_manager +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# + diff --git a/playbooks/roles/local_dev/app_bashrc.j2 b/playbooks/roles/local_dev/app_bashrc.j2 deleted file mode 100644 index d1678a1d545..00000000000 --- a/playbooks/roles/local_dev/app_bashrc.j2 +++ /dev/null @@ -1,18 +0,0 @@ -#! /usr/bin/env bash - -# {{ ansible_managed }} - -source "{{ item.home }}/{{ item.env }}" - -# If X11 forwarding is enabled, then use the DISPLAY value -# already set and use the X11 session cookie -if [ -n "$DISPLAY" ]; then - export XAUTHORITY="{{ localdev_xauthority }}" - -# Otherwise, configure the display to use the virtual frame buffer -else - export DISPLAY="{{ localdev_xvfb_display }}" -fi - -# Default to the code repository -cd "{{ item.home }}/{{ item.repo }}" diff --git a/playbooks/roles/local_dev/defaults/main.yml b/playbooks/roles/local_dev/defaults/main.yml index a172fa58840..6c61f1b2f67 100644 --- a/playbooks/roles/local_dev/defaults/main.yml +++ b/playbooks/roles/local_dev/defaults/main.yml @@ -6,15 +6,86 @@ localdev_xauthority: "{{ localdev_home }}/.Xauthority" localdev_xvfb_display: ":1" localdev_accounts: - - { user: "{{ edxapp_user}}", home: "{{ edxapp_app_dir }}", - env: "edxapp_env", repo: "edx-platform" } + - { + user: "{{ edxapp_user|default('None') }}", + home: "{{ edxapp_app_dir|default('None') }}", + env: "edxapp_env", + repo: "edx-platform" + } - - { user: "{{ forum_user }}",home: "{{ forum_app_dir }}", - env: "forum_env", repo: "cs_comments_service" } + - { + user: "{{ forum_user|default('None') }}", + home: "{{ forum_app_dir|default('None') }}", + env: "forum_env", + repo: "cs_comments_service" + } - - { user: "{{ ora_user }}", home: "{{ ora_app_dir }}", - env: "ora_env", repo: "ora" } + - { + user: "{{ ecommerce_user|default('None') }}", + home: "{{ ecommerce_home|default('None') }}", + env: "ecommerce_env", + repo: "ecommerce" + } + - { + user: "{{ ecommerce_worker_user|default('None') }}", + home: "{{ ecommerce_worker_home|default('None') }}", + env: "ecommerce_worker_env", + repo: "ecommerce_worker" + } + + - { + user: "{{ analytics_api_user|default('None') }}", + home: "{{ analytics_api_home|default('None') }}", + env: "analytics_api_env", + repo: "analytics_api" + } + + - { + user: "{{ insights_user|default('None') }}", + home: "{{ insights_home|default('None') }}", + env: "insights_env", + repo: "edx_analytics_dashboard" + } + + - { + user: "{{ credentials_user|default('None') }}", + home: "{{ credentials_home|default('None') }}", + env: "credentials_env", + repo: "credentials" + } + + - { + user: "{{ discovery_user|default('None') }}", + home: "{{ discovery_home|default('None') }}", + env: "discovery_env", + repo: "discovery" + } + + - { + user: "{{ registrar_user|default('None') }}", + home: "{{ registrar_home|default('None') }}", + env: "registrar_env", + repo: "registrar" + } + - { + user: "{{ learner_portal_user|default('None') }}", + home: "{{ learner_portal_home|default('None') }}", + env: "learner_portal_env", + repo: "learner_portal" + } + - { + user: "{{ program_console_user|default('None') }}", + home: "{{ program_console_home|default('None') }}", + env: "program_console_env", + repo: "program_console" + } + - { + user: "{{ prospectus_user|default('None') }}", + home: "{{ prospectus_home|default('None') }}", + env: "prospectus_env", + repo: "prospectus" + } # Helpful system packages for local dev local_dev_pkgs: @@ -22,3 +93,6 @@ local_dev_pkgs: - emacs - xorg - openbox + - libffi-dev + +LOCALDEV_JSCOVER_VERSION: "1.0.2" diff --git a/playbooks/roles/local_dev/files/ftplugin-python.vim b/playbooks/roles/local_dev/files/ftplugin-python.vim new file mode 100644 index 00000000000..b625ec23b42 --- /dev/null +++ b/playbooks/roles/local_dev/files/ftplugin-python.vim @@ -0,0 +1,13 @@ +" Python specific syntax handling + +" indent according to pep-8 rules (4 char, all spaces) +setlocal tabstop=8 +setlocal expandtab +setlocal shiftwidth=4 +setlocal softtabstop=4 +setlocal smarttab +setlocal smartindent +setlocal cinwords=if,elif,else,for,while,with,try,except,finally,def,class + +" Don't auto-align block comments to column 1 +inoremap # X# diff --git a/playbooks/roles/local_dev/files/vimrc b/playbooks/roles/local_dev/files/vimrc new file mode 100644 index 00000000000..6b330c8637e --- /dev/null +++ b/playbooks/roles/local_dev/files/vimrc @@ -0,0 +1,33 @@ +set nocompatible + +" Turn on syntax highlighting +syntax on + +" Handle filetypes +filetype on + +" Handle filetype-based plugins and indentation rules +filetype plugin indent on + +" Highlight matching delimiters (brackets, braces, quotes, etc.) +set showmatch + +" Show the following replacement characters on :set list +set listchars=tab:→\ ,trail:·,nbsp:¤,precedes:«,extends:»,eol:↲ + +" Never autocomplete filenames that match the following +set wildignore+=*.pyc,*.pyo + +" Silence error bells +set noerrorbells visualbell + +" Standard indentation rules: + +" - A tab character is 8 spaces wide +set tabstop=8 +" - a block indents four spaces +set shiftwidth=4 +" - Pressing the tab key indents by four spaces +set softtabstop=4 +" - Always render indentation as spaces +set expandtab diff --git a/playbooks/roles/local_dev/meta/main.yml b/playbooks/roles/local_dev/meta/main.yml new file mode 100644 index 00000000000..8017dffdac3 --- /dev/null +++ b/playbooks/roles/local_dev/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - role: jscover + JSCOVER_VERSION: "{{ LOCALDEV_JSCOVER_VERSION }}" diff --git a/playbooks/roles/local_dev/tasks/main.yml b/playbooks/roles/local_dev/tasks/main.yml index 57d64e9ef2c..4ee37052fbb 100644 --- a/playbooks/roles/local_dev/tasks/main.yml +++ b/playbooks/roles/local_dev/tasks/main.yml @@ -1,55 +1,122 @@ --- -- name: install useful system packages +- name: Install useful system packages apt: - pkg={{','.join(local_dev_pkgs)}} install_recommends=yes - state=present update_cache=yes + name: "{{ item }}" + install_recommends: yes + state: present + update_cache: yes + with_items: "{{ local_dev_pkgs }}" -- name: set login shell for app accounts - user: name={{ item.user }} shell="/bin/bash" - with_items: localdev_accounts +- name: Set login shell for app accounts + user: + name: "{{ item.user }}" + shell: "/bin/bash" + with_items: "{{ localdev_accounts }}" + when: item.user != 'None' -# Ensure forum user has permissions to access .gem and .rbenv -# This is a little twisty: the forum role sets the owner and group to www-data -# So we add the forum user to the www-data group and give group write permissions -- name: add forum user to www-data group - user: name={{ forum_user }} groups={{ common_web_group }} append=yes - -- name: set forum rbenv and gem permissions +# The user that runs the app needs read/write permissions +# to the directories under var to be able to import and create +# new courses. +- name: Update permissions for edxapp data dir file: - path={{ item }} state=directory mode=770 - with_items: - - "{{ forum_app_dir }}/.gem" - - "{{ forum_app_dir }}/.rbenv" + path: "{{ edxapp_data_dir }}" + state: "directory" + recurse: yes + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + when: edxapp_user is defined # Create scripts to configure environment -- name: create login scripts +- name: Create login scripts template: - src=app_bashrc.j2 dest={{ item.home }}/.bashrc - owner={{ item.user }} mode=755 - with_items: localdev_accounts + src: app_bashrc.j2 + dest: "{{ item.home }}/.bashrc" + owner: "{{ item.user }}" + mode: "0755" + with_items: "{{ localdev_accounts }}" + when: item.user != 'None' + ignore_errors: yes # Default to the correct git config # No more accidentally force pushing to master! :) -- name: configure git +- name: Configure git copy: - src=gitconfig dest={{ item.home }}/.gitconfig - owner={{ item.user }} mode=700 - with_items: localdev_accounts + src: gitconfig + dest: "{{ item.home }}/.gitconfig" + owner: "{{ item.user }}" + mode: "0700" + with_items: "{{ localdev_accounts }}" + when: item.user != 'None' + ignore_errors: yes # Configure X11 for application users -- name: preserve DISPLAY for sudo +- name: Preserve DISPLAY for sudo copy: - src=x11_display dest=/etc/sudoers.d/x11_display - owner=root group=root mode=0440 + src: x11_display + dest: /etc/sudoers.d/x11_display + owner: root + group: root + mode: "0440" -- name: login share X11 auth to app users +- name: Login share X11 auth to app users template: - src=share_x11.j2 dest={{ localdev_home }}/share_x11 - owner={{ localdev_user }} mode=0700 + src: share_x11.j2 + dest: "{{ localdev_home }}/share_x11" + owner: "{{ localdev_user }}" + mode: "0700" + +- name: Update bashrc with X11 share script + lineinfile: + dest: "{{ localdev_home }}/.bashrc" + regexp: ". {{ localdev_home }}/share_x11" + line: ". {{ localdev_home }}/share_x11" + state: present + +# Create a .bashrc.d directory to hold extra bash initializations +- name: Create .bashrc.d dir + file: + path: "{{ item.home }}/.bashrc.d" + owner: "{{ item.user }}" + group: "{{ common_web_group }}" + state: directory + with_items: "{{ localdev_accounts }}" + when: item.user != 'None' + +# Add useful vimrc files +- name: Create .vim/plugin directory + file: + path: "{{ item.home }}/.vim/ftplugin" + owner: "{{ item.user }}" + group: "{{ common_web_group }}" + state: directory + with_items: "{{ localdev_accounts }}" + when: item.user != 'None' + +- name: Add .vimrc file + copy: + src: vimrc + dest: "{{ item.home }}/.vimrc" + owner: "{{ item.user }}" + group: "{{ common_web_group }}" + mode: "0644" + with_items: "{{ localdev_accounts }}" + when: item.user != 'None' + +- name: Add python.vim ftplugin file + copy: + src: ftplugin-python.vim + dest: "{{ item.home }}/.vim/ftplugin/python.vim" + owner: "{{ item.user }}" + group: "{{ common_web_group }}" + mode: "0644" + with_items: "{{ localdev_accounts }}" + when: item.user != 'None' -- name: update bashrc with X11 share script +# Edit the /etc/hosts file so that the Preview button will work in Studio +- name: Add preview.localhost to /etc/hosts lineinfile: - dest={{ localdev_home }}/.bashrc - regexp=". {{ localdev_home }}/share_x11" - line=". {{ localdev_home }}/share_x11" - state=present + dest: /etc/hosts + regexp: "^127.0.0.1" + line: "127.0.0.1 localhost preview.localhost" + state: present + become: yes diff --git a/playbooks/roles/local_dev/templates/app_bashrc.j2 b/playbooks/roles/local_dev/templates/app_bashrc.j2 index df2bcc35d4f..31dfce59dd8 100644 --- a/playbooks/roles/local_dev/templates/app_bashrc.j2 +++ b/playbooks/roles/local_dev/templates/app_bashrc.j2 @@ -2,6 +2,20 @@ # {{ ansible_managed }} +# enable color support of ls and also add handy aliases +if [ -x /usr/bin/dircolors ]; then + eval "$(dircolors -b)" + alias ls='ls --color=auto' + alias grep='grep --color=auto' + alias fgrep='fgrep --color=auto' + alias egrep='egrep --color=auto' +fi + +# some more ls aliases +alias ll='ls -Al' +alias la='ls -A' + + source "{{ item.home }}/{{ item.env }}" # If X11 forwarding is enabled, then use the DISPLAY value @@ -14,4 +28,13 @@ else export DISPLAY="{{ localdev_xvfb_display }}" fi +# Import ~/.bashrc.d modules +if [ -d {{ item.home }}/.bashrc.d ]; then + for BASHMODULE in {{ item.home }}/.bashrc.d/*; do + source $BASHMODULE + done +fi + cd "{{ item.home }}/{{ item.repo }}" + +export JSCOVER_JAR="/usr/local/bin/JSCover-all-{{ LOCALDEV_JSCOVER_VERSION }}.jar" diff --git a/playbooks/roles/logstash/defaults/main.yml b/playbooks/roles/logstash/defaults/main.yml new file mode 100644 index 00000000000..adaac25245c --- /dev/null +++ b/playbooks/roles/logstash/defaults/main.yml @@ -0,0 +1,18 @@ +--- +LOGSTASH_DAYS_TO_KEEP: 30 +LOGSTASH_ROTATE: true +logstash_app_dir: /edx/app/logstash +logstash_log_dir: /edx/var/log/logstash +logstash_data_dir: /edx/var/logstash/file_logs +logstash_syslog_port: 514 +logstash_file: logstash-1.3.3-flatjar.jar +logstash_url: "https://download.elasticsearch.org/logstash/logstash/%7B%7B%20logstash_file%20%7D%7D" +logstash_python_requirements: + - pyes==0.19.0 +logstash_scripts_repo: https://github.com/crashdump/logstash-elasticsearch-scripts +logstash_rotate_cron: + hour: 5 + minute: 42 +logstash_optimize_cron: + hour: 6 + minute: 15 diff --git a/playbooks/roles/logstash/files/template_logstash.json b/playbooks/roles/logstash/files/template_logstash.json new file mode 100644 index 00000000000..064afa69c3d --- /dev/null +++ b/playbooks/roles/logstash/files/template_logstash.json @@ -0,0 +1,36 @@ +{ + "template": "logstash-*", + "settings" : { + "number_of_shards" : 1, + "number_of_replicas" : 0, + "index" : { + "query" : { "default_field" : "message" }, + "store" : { "compress" : { "stored" : true, "tv": true } } + } + }, + "mappings": { + "_default_": { + "_all": { "enabled": false }, + "_source": { "compress": true }, + "dynamic_templates": [ + { + "string_template" : { + "match" : "*", + "mapping": { "type": "string", "index": "not_analyzed" }, + "match_mapping_type" : "string" + } + } + ], + "properties" : { + "@fields": { "type": "object", "dynamic": true, "path": "full" }, + "@message" : { "type" : "string", "index" : "analyzed" }, + "@source" : { "type" : "string", "index" : "not_analyzed" }, + "@source_host" : { "type" : "string", "index" : "not_analyzed" }, + "@source_path" : { "type" : "string", "index" : "not_analyzed" }, + "@tags": { "type": "string", "index" : "not_analyzed" }, + "@timestamp" : { "type" : "date", "index" : "not_analyzed" }, + "@type" : { "type" : "string", "index" : "not_analyzed" } + } + } + } +} diff --git a/playbooks/roles/logstash/handlers/main.yml b/playbooks/roles/logstash/handlers/main.yml new file mode 100644 index 00000000000..7a5cc53757b --- /dev/null +++ b/playbooks/roles/logstash/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: restart logstash + service: name=logstash state=restarted + diff --git a/playbooks/roles/logstash/meta/default.yml b/playbooks/roles/logstash/meta/default.yml new file mode 100644 index 00000000000..2e3c003aa24 --- /dev/null +++ b/playbooks/roles/logstash/meta/default.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - elasticsearch diff --git a/playbooks/roles/logstash/tasks/main.yml b/playbooks/roles/logstash/tasks/main.yml new file mode 100644 index 00000000000..edcadd63237 --- /dev/null +++ b/playbooks/roles/logstash/tasks/main.yml @@ -0,0 +1,74 @@ +# requires: +# - oraclejdk +# - elasticsearch +--- + +- name: Ensure app apt dependencies are installed + apt: pkg={{ item }} state=installed + with_items: + - redis-server + +- name: Ensure {{ logstash_app_dir }} exists + file: path={{ logstash_app_dir }} state=directory owner=root group=root mode=0755 + +- name: Ensure subdirectories exist + file: path={{ logstash_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory + with_items: + - bin + - etc + - share + +- name: ensure logstash config is in place + template: src=logstash.conf.j2 dest={{ logstash_app_dir }}/etc/logstash.conf owner=root group=root mode=0644 + notify: restart logstash + +- name: ensure logstash upstart job is in place + template: src=logstash.upstart.conf.j2 dest=/etc/init/logstash.conf owner=root group=root mode=0755 + +- name: ensure logstash has a logging dir at {{ logstash_log_dir }} + file: path={{ logstash_log_dir }} owner=root group=root mode=0755 state=directory + +- name: ensure we have the specified logstash release + get_url: url={{ logstash_url }} dest={{ logstash_app_dir }}/share/{{ logstash_file }} + +- name: ensure symlink with no version exists at {{ logstash_app_dir }}/share/logstash.jar + file: src={{ logstash_app_dir }}/share/{{ logstash_file }} dest={{ logstash_app_dir }}/share/logstash.jar state=link + +- name: start logstash + action: service name=logstash state=started enabled=yes + +- name: Ensure we are running + wait_for: port={{ logstash_syslog_port }} host=localhost timeout=60 + +- name: Copy logstash es index template + copy: src=template_logstash.json dest=/etc/elasticsearch/template_logstash.json + +- name: Enable logstash es index template + shell: chdir=/etc/elasticsearch executable=/bin/bash curl -XPUT 'http://localhost:9200/_template/template_logstash' -d @template_logstash.json + + +- name: Install python requirements + pip: name={{ item }} state=present + with_items: "{{ logstash_python_requirements }}" + +- name: Checkout logstash rotation scripts + git: repo={{ logstash_scripts_repo }} dest={{ logstash_app_dir }}/share/logstash-elasticsearch-scripts + when: LOGSTASH_ROTATE|bool + +- name: Setup cron to run rotation + cron: + user: root + name: "Elasticsearch logstash index rotation" + hour: "{{ logstash_rotate_cron.hour }}" + minute: "{{ logstash_rotate_cron.minute }}" + job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron" + when: LOGSTASH_ROTATE|bool + +- name: Setup cron to run rotation + cron: + user: root + name: "Elasticsearch logstash index optimization" + hour: "{{ logstash_optimize_cron.hour }}" + minute: "{{ logstash_optimize_cron.minute }}" + job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron" + when: LOGSTASH_ROTATE|bool diff --git a/playbooks/roles/logstash/templates/logstash.conf.j2 b/playbooks/roles/logstash/templates/logstash.conf.j2 new file mode 100644 index 00000000000..5a2d819fd69 --- /dev/null +++ b/playbooks/roles/logstash/templates/logstash.conf.j2 @@ -0,0 +1,54 @@ +input { + tcp { + port => {{ logstash_syslog_port }} + type => syslog + } + udp { + port => {{ logstash_syslog_port }} + type => syslog + } +} + +filter { + if [type] == "syslog" { + grok { + match => { "message" => "<%{POSINT:syslog_pri}>%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{GREEDYDATA:syslog_message}" } + add_field => [ "received_at", "%{@timestamp}" ] + add_field => [ "received_from", "%{@source_host}" ] + } + syslog_pri { } + date { + match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] + } + # Try and parse the tracking log json + # 142 is syslog facility 17 (local1) and Informational. + # This is used to reduce the number of errors in json parsing as + # tracking uses that facility and priority by default. + if "142" in [syslog_pri] { + json { + source => "syslog_message" + target => "tracking" + } + } + if !("_grokparsefailure" in [tags]) { + mutate { + replace => [ "@source_host", "%{syslog_hostname}" ] + replace => [ "@message", "%{syslog_message}" ] + } + } + mutate { + remove_field => [ "syslog_hostname", "syslog_message", "syslog_timestamp" ] + } + } +} + +output { + # Example just to output to elasticsearch + elasticsearch { } + # And gzip for each host and program + file { + path => '{{ logstash_data_dir }}/%{@source_host}/all.%{+yyyyMMdd}.gz' + gzip => true + } + # Should add option for S3 as well. +} diff --git a/playbooks/roles/logstash/templates/logstash.upstart.conf.j2 b/playbooks/roles/logstash/templates/logstash.upstart.conf.j2 new file mode 100644 index 00000000000..7727665e2a2 --- /dev/null +++ b/playbooks/roles/logstash/templates/logstash.upstart.conf.j2 @@ -0,0 +1,26 @@ +# logstash-indexer.conf# logstash - indexer instance +# + +description "logstash indexer instance" + +start on virtual-filesystems +stop on runlevel [06] + +respawn +respawn limit 5 30 +limit nofile 65550 65550 + +env HOME={{ logstash_app_dir }} +env JAVA_OPTS='-Xms512m -Xmx512m' +env PATH=$PATH:/usr/lib/jvm/{{ oraclejdk_base }}/bin + +chdir {{ logstash_app_dir }} +setuid root +console log + +# for versions 1.1.1 - 1.1.4 the internal web service crashes when touched +# and the current workaround is to just not run it and run Kibana instead + +script + exec java -jar {{ logstash_app_dir }}/share/logstash.jar agent -f {{ logstash_app_dir }}/etc/logstash.conf --log {{ logstash_log_dir }}/logstash-indexer.out +end script diff --git a/playbooks/roles/mariadb/defaults/main.yml b/playbooks/roles/mariadb/defaults/main.yml new file mode 100644 index 00000000000..4c002a0075d --- /dev/null +++ b/playbooks/roles/mariadb/defaults/main.yml @@ -0,0 +1,161 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role mariadb +# +MARIADB_APT_KEY_XENIAL_ID: '0xF1656F24C74CD1D8' +MARIADB_APT_KEY_ID: '0xcbcb082a1bb943db' + +# Note: version is determined by repo +MARIADB_REPO: "deb http://mirrors.syringanetworks.net/mariadb/repo/10.0/ubuntu {{ ansible_distribution_release }} main" + +MARIADB_CREATE_DBS: yes +MARIADB_CLUSTERED: no +MARIADB_CLUSTER_USER_ADMIN: "mariadb_clu_root" +MARIADB_CLUSTER_PASSWORD_ADMIN: "password" +MARIADB_HOST_PRIV: '%' + +MARIADB_HAPROXY_USER: 'haproxy' +MARIADB_HAPROXY_HOSTS: + - '192.168.33.100' + - '192.168.33.110' + - '192.168.33.120' + +MARIADB_LISTEN_ALL: false + +MARIADB_DATABASES: + - { + db: "{{ ECOMMERCE_DATABASE_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ INSIGHTS_DATABASE_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ XQUEUE_MYSQL_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ EDXAPP_MYSQL_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ EDXAPP_MYSQL_CSMH_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ EDX_NOTES_API_MYSQL_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ PROGRAMS_DEFAULT_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ ANALYTICS_API_DEFAULT_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ ANALYTICS_API_REPORTS_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ CREDENTIALS_DEFAULT_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ DISCOVERY_DEFAULT_DB_NAME | default(None) }}", + encoding: "utf8" + } + - { + db: "{{ HIVE_METASTORE_DATABASE_NAME | default(None) }}", + encoding: "latin1" + } + - { + db: "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}", + encoding: "utf8" + } + +MARIADB_USERS: + - { + db: "{{ ECOMMERCE_DATABASE_NAME | default(None) }}", + user: "{{ ECOMMERCE_DATABASE_USER | default(None) }}", + pass: "{{ ECOMMERCE_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ INSIGHTS_DATABASE_NAME | default(None) }}", + user: "{{ INSIGHTS_MYSQL_USER | default(None) }}", + pass: "{{ INSIGHTS_MYSQL_USER | default(None) }}" + } + - { + db: "{{ XQUEUE_MYSQL_DB_NAME | default(None) }}", + user: "{{ XQUEUE_MYSQL_USER | default(None) }}", + pass: "{{ XQUEUE_MYSQL_PASSWORD | default(None) }}" + + } + - { + db: "{{ EDXAPP_MYSQL_DB_NAME | default(None) }}", + user: "{{ EDXAPP_MYSQL_USER | default(None) }}", + pass: "{{ EDXAPP_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ EDXAPP_MYSQL_CSMH_DB_NAME | default(None) }}", + user: "{{ EDXAPP_MYSQL_CSMH_USER | default(None) }}", + pass: "{{ EDXAPP_MYSQL_CSMH_PASSWORD | default(None) }}" + } + - { + db: "{{ PROGRAMS_DEFAULT_DB_NAME | default(None) }}", + user: "{{ PROGRAMS_DATABASE_USER | default(None) }}", + pass: "{{ PROGRAMS_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME | default(None) }}", + user: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER | default(None) }}", + pass: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ HIVE_METASTORE_DATABASE_NAME | default(None) }}", + user: "{{ HIVE_METASTORE_DATABASE_USER | default(None) }}", + pass: "{{ HIVE_METASTORE_DATABASE_PASSWORD | default(None) }}" + } + - { + db: "{{ CREDENTIALS_DEFAULT_DB_NAME | default(None) }}", + user: "{{ CREDENTIALS_MYSQL_USER | default(None) }}", + pass: "{{ CREDENTIALS_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ DISCOVERY_DEFAULT_DB_NAME | default(None) }}", + user: "{{ DISCOVERY_MYSQL_USER | default(None) }}", + pass: "{{ DISCOVERY_MYSQL_PASSWORD | default(None) }}" + } + - { + db: "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}", + user: "{{ BLOCKSTORE_DATABASE_USER | default(None) }}", + pass: "{{ BLOCKSTORE_DATABASE_PASSWORD | default(None) }}" + } + +# +# OS packages +# +mariadb_debian_pkgs: + - python-software-properties + - python-mysqldb + +mariadb_redhat_pkgs: [] + +mariadb_apt_repository: + +mariadb_solo_packages: + - mariadb-server + +mariadb_cluster_packages: + - mariadb-galera-server-10.0 + - galera-3 diff --git a/playbooks/roles/mariadb/meta/main.yml b/playbooks/roles/mariadb/meta/main.yml new file mode 100644 index 00000000000..a1e245e7f36 --- /dev/null +++ b/playbooks/roles/mariadb/meta/main.yml @@ -0,0 +1,22 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role mariadb +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } +dependencies: + - common diff --git a/playbooks/roles/mariadb/tasks/cluster.yml b/playbooks/roles/mariadb/tasks/cluster.yml new file mode 100644 index 00000000000..02f0f26505f --- /dev/null +++ b/playbooks/roles/mariadb/tasks/cluster.yml @@ -0,0 +1,62 @@ +- name: copy galera cluster config + template: + src: "etc/mysql/conf.d/galera.cnf.j2" + dest: "/etc/mysql/conf.d/galera.cnf" + owner: "root" + group: "root" + mode: 0600 + +- name: check if we have already bootstrapped the cluster + stat: path=/etc/mysql/ansible_cluster_started + register: mariadb_bootstrap + +- name: stop mysql for cluster bootstrap + service: name=mysql state=stopped + when: not mariadb_bootstrap.stat.exists + +- name: setup bootstrap on primary + lineinfile: + dest: "/etc/mysql/conf.d/galera.cnf" + regexp: "^wsrep_cluster_address=gcomm://{{ groups[group_names[0]]|sort|join(',') }}$" + line: "wsrep_cluster_address=gcomm://" + when: inventory_hostname == hostvars[groups[group_names[0]][0]].inventory_hostname and not mariadb_bootstrap.stat.exists + +- name: fetch debian.cnf file so start-stop will work properly + fetch: + src: "/etc/mysql/debian.cnf" + dest: "/tmp/debian.cnf" + fail_on_missing: yes + flat: yes + when: inventory_hostname == hostvars[groups[group_names[0]][0]].inventory_hostname and not mariadb_bootstrap.stat.exists + register: mariadb_new_debian_cnf + +- name: copy fetched file to other cluster members + copy: src=/tmp/debian.cnf dest=/etc/mysql/debian.cnf + when: not (mariadb_new_debian_cnf is skipped) + +- name: start everything + service: name=mysql state=started + when: not mariadb_bootstrap.stat.exists + +- name: reset galera cluster config since we are bootstrapped + template: + src: "etc/mysql/conf.d/galera.cnf.j2" + dest: "/etc/mysql/conf.d/galera.cnf" + owner: "root" + group: "root" + mode: 0600 + when: not mariadb_bootstrap.stat.exists + +- name: touch bootstrap file to confirm we are fully up + file: path="/etc/mysql/ansible_cluster_started" state=touch + +# This is needed for mysql-check in haproxy or other mysql monitor +# scripts to prevent haproxy checks exceeding `max_connect_errors`. +- name: create haproxy monitor user + mysql_user: + name: "{{ MARIADB_HAPROXY_USER }}" + host: "{{ item }}" + password: "" + priv: "*.*:USAGE,RELOAD" + state: present + with_items: MARIADB_HAPROXY_HOSTS diff --git a/playbooks/roles/mariadb/tasks/main.yml b/playbooks/roles/mariadb/tasks/main.yml new file mode 100644 index 00000000000..75b6663412e --- /dev/null +++ b/playbooks/roles/mariadb/tasks/main.yml @@ -0,0 +1,131 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role mariadb +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: Install pre-req debian packages + apt: name={{ item }} state=present + with_items: "{{ mariadb_debian_pkgs }}" + +- name: Add mariadb apt key + apt_key: + url: "{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_ID }}" + when: ansible_distribution_release != 'xenial' + +- name: Add Xenial mariadb apt key + apt_key: + url: "{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_XENIAL_ID }}" + when: ansible_distribution_release == 'xenial' + +- name: add the mariadb repo to the sources list + apt_repository: + repo: "{{ MARIADB_REPO }}" + state: present + +- name: install mariadb solo packages + apt: name={{ item }} update_cache=yes + with_items: "{{ mariadb_solo_packages }}" + when: not MARIADB_CLUSTERED|bool + +- name: install mariadb cluster packages + apt: name={{ item }} update_cache=yes + with_items: "{{ mariadb_cluster_packages }}" + when: MARIADB_CLUSTERED|bool + +- name: remove bind-address + lineinfile: + dest: /etc/mysql/my.cnf + regexp: '^bind-address\s+=\s+127\.0\.0\.1$' + state: absent + when: MARIADB_LISTEN_ALL|bool or MARIADB_CLUSTERED|bool + +- include: cluster.yml + when: MARIADB_CLUSTERED|bool + +- name: start everything + service: name=mysql state=started + +- name: create databases + mysql_db: + db: "{{ item.db }}" + state: present + encoding: "{{ item.encoding }}" + when: item != None and item != '' and MARIADB_CREATE_DBS|bool + with_items: "{{ MARIADB_DATABASES }}" + +- name: create database users + mysql_user: + name: "{{ item.user }}" + password: "{{ item.pass }}" + priv: "{{ item.db }}.*:SELECT,INSERT,UPDATE,DELETE" + host: "{{ MARIADB_HOST_PRIV }}" + append_privs: yes + when: item.db != None and item.db != '' + with_items: "{{ MARIADB_USERS }}" + +- name: setup the migration db user + mysql_user: + name: "{{ COMMON_MYSQL_MIGRATE_USER }}" + password: "{{ COMMON_MYSQL_MIGRATE_PASS }}" + priv: "{{ item.db }}.*:ALL" + host: "{{ MARIADB_HOST_PRIV }}" + append_privs: yes + when: item != None and item != '' + with_items: "{{ MARIADB_DATABASES }}" + +- name: create api user for the analytics api + mysql_user: + name: "api001" + password: "{{ ANALYTICS_API_DATABASES.default.PASSWORD }}" + priv: '{{ ANALYTICS_API_DATABASES.default.NAME }}.*:SELECT,INSERT,UPDATE,DELETE/reports.*:SELECT' + host: "{{ MARIADB_HOST_PRIV }}" + when: ANALYTICS_API_SERVICE_CONFIG is defined + +- name: create read-only reports user for the analytics-api + mysql_user: + name: reports001 + password: "{{ ANALYTICS_API_DATABASES.reports.PASSWORD }}" + priv: '{{ ANALYTICS_API_DATABASES.reports.NAME }}.*:SELECT' + host: "{{ MARIADB_HOST_PRIV }}" + when: ANALYTICS_API_SERVICE_CONFIG is defined + +- name: setup the edx-notes-api db user + mysql_user: + name: "{{ EDX_NOTES_API_MYSQL_DB_USER }}" + password: "{{ EDX_NOTES_API_MYSQL_DB_PASS }}" + priv: "{{ EDX_NOTES_API_MYSQL_DB_NAME }}.*:SELECT,INSERT,UPDATE,DELETE" + host: "{{ MARIADB_HOST_PRIV }}" + when: EDX_NOTES_API_MYSQL_DB_USER is defined + +- name: setup the read-only db user + mysql_user: + name: "{{ COMMON_MYSQL_READ_ONLY_USER }}" + password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}" + priv: "*.*:SELECT" + host: "{{ MARIADB_HOST_PRIV }}" + +- name: setup the admin db user + mysql_user: + name: "{{ COMMON_MYSQL_ADMIN_USER }}" + password: "{{ COMMON_MYSQL_ADMIN_PASS }}" + priv: "*.*:CREATE USER" + host: "{{ MARIADB_HOST_PRIV }}" + diff --git a/playbooks/roles/mariadb/templates/etc/mysql/conf.d/galera.cnf.j2 b/playbooks/roles/mariadb/templates/etc/mysql/conf.d/galera.cnf.j2 new file mode 100644 index 00000000000..9a7c8b09e10 --- /dev/null +++ b/playbooks/roles/mariadb/templates/etc/mysql/conf.d/galera.cnf.j2 @@ -0,0 +1,17 @@ +{%- set hosts= [] -%} +{%- for host in hostvars.keys()|sort -%} + {% do hosts.append(host) %} +{%- endfor %} +[mysqld] +binlog_format=ROW +innodb_autoinc_lock_mode=2 +innodb_doublewrite=1 +query_cache_size=0 + +wsrep_provider=/usr/lib/galera/libgalera_smm.so +wsrep_cluster_address=gcomm://{{ hosts|join(',') }}?pc.wait_prim=no +wsrep_sst_auth={{ MARIADB_CLUSTER_USER_ADMIN }}:{{ MARIADB_CLUSTER_PASSWORD_ADMIN }} + +{% if vagrant_cluster|bool %} +wsrep_node_address={{ ansible_ssh_host }} +{% endif %} diff --git a/playbooks/roles/memcache/tasks/main.yml b/playbooks/roles/memcache/tasks/main.yml new file mode 100644 index 00000000000..04413fa6285 --- /dev/null +++ b/playbooks/roles/memcache/tasks/main.yml @@ -0,0 +1,10 @@ +# Installs memcached + +- name: Install memcached + apt: + name: memcached + state: present + update_cache: yes + tags: + - install + - install:memcache diff --git a/playbooks/roles/mfe/README.rst b/playbooks/roles/mfe/README.rst new file mode 100644 index 00000000000..fedac910b48 --- /dev/null +++ b/playbooks/roles/mfe/README.rst @@ -0,0 +1,7 @@ + +MFE base role +############# + +Base role to build MFEs. For deployments it is recommended to use the `mfe_deployer`_ role. + +.. _mfe_deployer: ../mfe_deployer diff --git a/playbooks/roles/mfe/defaults/main.yml b/playbooks/roles/mfe/defaults/main.yml new file mode 100644 index 00000000000..f47a6b588b6 --- /dev/null +++ b/playbooks/roles/mfe/defaults/main.yml @@ -0,0 +1,148 @@ +--- +MFE_NAME: ' NOT-SET ' +MFE_REPO: 'frontend-app-{{ MFE_NAME }}' +MFE_HOME: '{{ COMMON_APP_DIR }}/{{ MFE_NAME }}' +MFE_USER: '{{ MFE_NAME }}' + +MFE_GIT_PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' +MFE_GIT_DOMAIN: '{{ COMMON_GIT_MIRROR }}' +MFE_GIT_PATH: '{{ COMMON_GIT_PATH }}' +MFE_VERSION: 'master' +MFE_GIT_IDENTITY: null + +MFE_CODE_DIR: '{{ MFE_HOME }}/{{ MFE_REPO }}' +MFE_NODEENV_DIR: '{{ MFE_HOME }}/nodeenvs/{{ MFE_NAME }}' +MFE_NODEENV_BIN: '{{ MFE_NODEENV_DIR }}/bin' +MFE_NODE_MODULES_DIR: '{{ MFE_CODE_DIR }}/node_modules' +MFE_NODE_BIN: '{{ MFE_NODE_MODULES_DIR }}/.bin' +MFE_NODE_VERSION: '18.17.0' +MFE_NPM_VERSION: '8.1.2' + +MFE_DEBIAN_PKGS_DEFAULT: + - gettext + - nodeenv +MFE_DEBIAN_PKGS_EXTRA: [] +MFE_DEBIAN_PKGS: '{{ MFE_DEBIAN_PKGS_DEFAULT + MFE_DEBIAN_PKGS_EXTRA }}' + +MFE_CORS_ALLOWLIST: [] +MFE_ALLOW_CORS_HEADERS: false +MFE_MAX_WEBSERVER_UPLOAD: !!null +MFE_ALLOW_CORS_CREDENTIALS: false + +MFE_HOSTNAME: '~^((stage|prod)-)?{{ MFE_NAME }}.*' +MFE_NGINX_PORT: '80' +MFE_NGINX_READ_TIMEOUT: !!null + +MFE_SSL_NGINX_PORT: '443' +MFE_ENABLE_BASIC_AUTH: false + +MFE_REPOS: + - PROTOCOL: '{{ MFE_GIT_PROTOCOL }}' + DOMAIN: '{{ MFE_GIT_DOMAIN }}' + PATH: '{{ MFE_GIT_PATH }}' + REPO: '{{ MFE_REPO }}.git' + VERSION: '{{ MFE_VERSION }}' + DESTINATION: '{{ MFE_CODE_DIR }}' + SSH_KEY: '{{ MFE_GIT_IDENTITY }}' + +MFE_NODE_ENV: production +MFE_BASE: "{{ MFE_NAME }}.{{ COMMON_LMS_BASE_URL }}" +MFE_PUBLIC_PATH: "/" +MFE_BASE_SCHEMA: "http" +MFE_BASE_URL: "{{ MFE_BASE_SCHEMA }}://{{ MFE_BASE }}" +MFE_LMS_BASE_URL: "{{ COMMON_LMS_BASE_URL }}" +MFE_ECOMMERCE_BASE_URL: "{{ COMMON_ECOMMERCE_BASE_URL }}" +MFE_LOGIN_URL: "{{ MFE_LMS_BASE_URL }}/login" +MFE_LOGOUT_URL: "{{ MFE_LMS_BASE_URL }}/logout" +MFE_SITE_NAME: "" +MFE_MARKETING_SITE_BASE_URL: "{{ MFE_LMS_BASE_URL }}" +MFE_CONTACT_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}/contact" +MFE_CSRF_TOKEN_API_PATH: "/csrf/api/v1/token" +MFE_CREDENTIALS_BASE_URL: "" +MFE_REFRESH_ACCESS_TOKEN_ENDPOINT: "{{ MFE_LMS_BASE_URL }}/login_refresh" +MFE_DATA_API_BASE_URL: "{{ MFE_LMS_BASE_URL }}" +MFE_ACCESS_TOKEN_COOKIE_NAME: "{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}" +MFE_SUPPORT_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}/contact" +MFE_OPEN_SOURCE_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}/opensource" +MFE_TERMS_OF_SERVICE_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}/tos" +MFE_PRIVACY_POLICY_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}/privacy" +MFE_SEARCH_CATALOG_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}/courses" +MFE_LANGUAGE_PREFERENCE_COOKIE_NAME: "openedx-language-preference" + +MFE_LOGO_URL: "{{ COMMON_LOGO_URL }}" +MFE_LOGO_TRADEMARK_URL: "{{ COMMON_LOGO_TRADEMARK_URL }}" +MFE_LOGO_WHITE_URL: "{{ COMMON_LOGO_WHITE_URL }}" +MFE_FAVICON_URL: "{{ COMMON_FAVICON_URL }}" + +MFE_FACEBOOK_URL: "" +MFE_TWITTER_URL: "" +MFE_YOU_TUBE_URL: "" +MFE_LINKED_IN_URL: "" +MFE_REDDIT_URL: "" + +MFE_APPLE_APP_STORE_URL: "" +MFE_GOOGLE_PLAY_URL: "" + +MFE_ENTERPRISE_MARKETING_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}" +MFE_ENTERPRISE_MARKETING_UTM_SOURCE: "" +MFE_ENTERPRISE_MARKETING_UTM_CAMPAIGN: "" +MFE_ENTERPRISE_MARKETING_FOOTER_UTM_MEDIUM: "" + +MFE_ORDER_HISTORY_URL: "" +MFE_USER_INFO_COOKIE_NAME: edx-user-info + +MFE_NEW_RELIC_APP_ID: 'fake_app' +MFE_NEW_RELIC_LICENSE_KEY: 'fake_license' + +MFE_ENVIRONMENT_DEFAULT: + PATH: '{{ MFE_NODEENV_BIN }}:{{ ansible_env.PATH }}' + NODE_ENV: "{{ MFE_NODE_ENV }}" + BASE_URL: "{{ MFE_BASE_URL }}" + PUBLIC_PATH: "{{ MFE_PUBLIC_PATH }}" + LMS_BASE_URL: "{{ MFE_LMS_BASE_URL }}" + LOGIN_URL: "{{ MFE_LOGIN_URL }}" + LOGOUT_URL: "{{ MFE_LOGOUT_URL }}" + SITE_NAME: "{{ MFE_SITE_NAME }}" + MARKETING_SITE_BASE_URL: "{{ MFE_MARKETING_SITE_BASE_URL }}" + CONTACT_URL: "{{ MFE_CONTACT_URL }}" + CSRF_TOKEN_API_PATH: "{{ MFE_CSRF_TOKEN_API_PATH }}" + CREDENTIALS_BASE_URL: "{{ MFE_CREDENTIALS_BASE_URL }}" + REFRESH_ACCESS_TOKEN_ENDPOINT: "{{ MFE_REFRESH_ACCESS_TOKEN_ENDPOINT }}" + DATA_API_BASE_URL: "{{ MFE_DATA_API_BASE_URL }}" + ACCESS_TOKEN_COOKIE_NAME: "{{ MFE_ACCESS_TOKEN_COOKIE_NAME }}" + SUPPORT_URL: "{{ MFE_SUPPORT_URL }}" + OPEN_SOURCE_URL: "{{ MFE_OPEN_SOURCE_URL }}" + TERMS_OF_SERVICE_URL: "{{ MFE_TERMS_OF_SERVICE_URL }}" + PRIVACY_POLICY_URL: "{{ MFE_PRIVACY_POLICY_URL }}" + SEARCH_CATALOG_URL: "{{ MFE_SEARCH_CATALOG_URL }}" + FACEBOOK_URL: "{{ MFE_FACEBOOK_URL }}" + TWITTER_URL: "{{ MFE_TWITTER_URL }}" + YOU_TUBE_URL: "{{ MFE_YOU_TUBE_URL }}" + LINKED_IN_URL: "{{ MFE_LINKED_IN_URL }}" + REDDIT_URL: "{{ MFE_REDDIT_URL }}" + APPLE_APP_STORE_URL: "{{ MFE_APPLE_APP_STORE_URL }}" + GOOGLE_PLAY_URL: "{{ MFE_GOOGLE_PLAY_URL }}" + ENTERPRISE_MARKETING_URL: "{{ MFE_ENTERPRISE_MARKETING_URL }}" + ENTERPRISE_MARKETING_UTM_SOURCE: "{{ MFE_ENTERPRISE_MARKETING_UTM_SOURCE }}" + ENTERPRISE_MARKETING_UTM_CAMPAIGN: "{{ MFE_ENTERPRISE_MARKETING_UTM_CAMPAIGN }}" + ENTERPRISE_MARKETING_FOOTER_UTM_MEDIUM: "{{ MFE_ENTERPRISE_MARKETING_FOOTER_UTM_MEDIUM }}" + ECOMMERCE_BASE_URL: "{{ MFE_ECOMMERCE_BASE_URL }}" + ORDER_HISTORY_URL: "{{ MFE_ORDER_HISTORY_URL }}" + USER_INFO_COOKIE_NAME: "{{ MFE_USER_INFO_COOKIE_NAME }}" + NEW_RELIC_LICENSE_KEY: '{{ MFE_NEW_RELIC_LICENSE_KEY }}' + NEW_RELIC_APP_ID: '{{ MFE_NEW_RELIC_APP_ID }}' + LOGO_URL: "{{ MFE_LOGO_URL }}" + LOGO_TRADEMARK_URL: "{{ MFE_LOGO_TRADEMARK_URL }}" + LOGO_WHITE_URL: "{{ MFE_LOGO_WHITE_URL }}" + FAVICON_URL: "{{ MFE_FAVICON_URL }}" + LANGUAGE_PREFERENCE_COOKIE_NAME: "{{ MFE_LANGUAGE_PREFERENCE_COOKIE_NAME }}" + +MFE_STANDALONE_NGINX: true + +# This variable can be overridden to include extra defaults for all MFEs deployed with the `mfe_deployer` role. +MFE_ENVIRONMENT_DEFAULT_EXTRA: {} +# NOTE: This should be overridden by inheriting MFE-specific role. +MFE_ENVIRONMENT_EXTRA: {} +MFE_ENVIRONMENT: '{{ MFE_ENVIRONMENT_DEFAULT | combine(MFE_ENVIRONMENT_DEFAULT_EXTRA) | combine(MFE_ENVIRONMENT_EXTRA) }}' + +MFE_NPM_OVERRIDES: [] diff --git a/playbooks/roles/mfe/meta/main.yml b/playbooks/roles/mfe/meta/main.yml new file mode 100644 index 00000000000..7f566a08883 --- /dev/null +++ b/playbooks/roles/mfe/meta/main.yml @@ -0,0 +1,14 @@ +--- +dependencies: + - common + - role: add_user + user_name: "{{ MFE_USER }}" + user_home: "{{ MFE_HOME }}" + group_name: "{{ common_web_group }}" + - role: git_clone + repo_owner: "{{ MFE_USER }}" + repo_group: "{{ common_web_group }}" + GIT_REPOS: "{{ MFE_REPOS }}" + git_home: "{{ MFE_HOME }}" + when: MFE_REPOS is defined + diff --git a/playbooks/roles/mfe/tasks/main.yml b/playbooks/roles/mfe/tasks/main.yml new file mode 100644 index 00000000000..cb4aae50257 --- /dev/null +++ b/playbooks/roles/mfe/tasks/main.yml @@ -0,0 +1,140 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role mfe +# +# Overview: +# +# This role performs the operations needed to deploy a MFE +# in a single server. +# +# Example play: +# +# It is recommended to deploy MFE directly using the mfe_deployer role instead of this one. +# This role can be used as a dependency by other roles in the meta/main.yml +# file. The including role should add the following dependency definition. +# +# dependencies: +# - role: mfe +# MFE_BASE: "{{ PROFILE_MFE_BASE }}" +# MFE_BASE_URL: "{{ PROFILE_BASE_URL }}" +# MFE_NAME: "{{ PROFILE_REPO_NAME }}" +# MFE_SITE_NAME: "{{ PROFILE_SITE_NAME }}" +# MFE_APP_VERSION: "{{ PROFILE_VERSION }}" +# MFE_NODE_VERSION: "{{ PROFILE_NODE_VERSION }}" +# +# This role can also be used directly in the playbooks +# +#- name: Configure instance(s) +# hosts: all +# become: True +# gather_facts: True +# roles: +# - role: mfe +# MFE_NAME: profile +# - role: mfe +# MFE_NAME: gradebook +# - role: mfe +# MFE_NAME: account +# + +- name: install needed packages + apt: + name: "{{ MFE_DEBIAN_PKGS }}" + state: present + update_cache: true + cache_valid_time: 3600 + tags: + - install + - install:system-requirements + +- name: create nodeenv + command: "nodeenv {{ MFE_NODEENV_DIR }} --node={{ MFE_NODE_VERSION }} --prebuilt" + args: + creates: "{{ MFE_NODEENV_DIR }}" + become_user: "{{ MFE_USER }}" + environment: "{{ MFE_ENVIRONMENT }}" + tags: + - install + - install:system-requirements + +- name: upgrade npm + command: "npm install -g npm@{{ MFE_NPM_VERSION }}" + become_user: "{{ MFE_USER }}" + environment: "{{ MFE_ENVIRONMENT }}" + tags: + - install + - install:system-requirements + +- name: install npm dependencies + shell: "npm install --include=dev --no-save" + args: + chdir: "{{ MFE_CODE_DIR }}" + become_user: "{{ MFE_USER }}" + environment: "{{ MFE_ENVIRONMENT }}" + tags: + - install + - install:app-requirements + +- name: install npm overrides + shell: "npm install {{ item }} --include=dev --no-save" + args: + chdir: "{{ MFE_CODE_DIR }}" + become_user: "{{ MFE_USER }}" + environment: "{{ MFE_ENVIRONMENT }}" + with_items: "{{ MFE_NPM_OVERRIDES }}" + tags: + - install + - install:app-requirements + +- name: build MFE + command: "npm run build" + args: + chdir: "{{ MFE_CODE_DIR }}" + become_user: "{{ MFE_USER }}" + environment: "{{ MFE_ENVIRONMENT }}" + tags: + - install:base + + +- name: Copying nginx configs for the service + template: + src: "edx/app/nginx/sites-available/app.j2" + dest: "{{ nginx_sites_available_dir }}/{{ MFE_NAME }}" + owner: root + group: "{{ common_web_user }}" + mode: 0640 + when: nginx_app_dir is defined and MFE_STANDALONE_NGINX + notify: reload nginx + tags: + - install:base + - install:vhosts + +- name: Creating nginx config links for the service + file: + src: "{{ nginx_sites_available_dir }}/{{ MFE_NAME }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ MFE_NAME }}" + state: link + owner: root + group: root + when: nginx_app_dir is defined and MFE_STANDALONE_NGINX + notify: reload nginx + tags: + - install:base + - install:vhosts + +# If tasks that notify reload nginx don't change the state of the remote system +# their corresponding notifications don't get run. This will ensure that it has +# been reloaded successfully. +- name: make sure nginx has reloaded + service: + name: nginx + state: reloaded diff --git a/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/app.j2 b/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/app.j2 new file mode 100644 index 00000000000..6b11cd67693 --- /dev/null +++ b/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/app.j2 @@ -0,0 +1,46 @@ +# +# {{ ansible_managed }} +# + +server { + server_name {{ MFE_HOSTNAME }}; + listen {{ MFE_NGINX_PORT }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ MFE_NGINX_PORT }}; + {% endif %} + + # Increase accepted header size to account for overenthusiastic usage of cookies + large_client_header_buffers 8 16k; + + +{% if NGINX_ENABLE_SSL %} + {% include "concerns/handle-ip-disclosure.j2" %} + rewrite ^ https://$host$request_uri? permanent; +{% else %} + {% if NGINX_REDIRECT_TO_HTTPS %} + {% include "concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2" %} + {% include "concerns/handle-tls-terminated-elsewhere-redirect.j2" %} + {% else %} + {% include "concerns/mfe-common.j2" %} + {% include "concerns/mfe.j2" %} + {% endif %} +{% endif %} + +} + +{% if NGINX_ENABLE_SSL %} +server { + server_name {{ MFE_HOSTNAME }}; + listen {{ MFE_SSL_NGINX_PORT }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ MFE_SSL_NGINX_PORT }} ssl; + {% endif %} + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + {% include "concerns/mfe-common.j2" %} + + {% include "concerns/mfe.j2" %} +} +{% endif %} diff --git a/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 b/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 new file mode 100644 index 00000000000..bb5880afef1 --- /dev/null +++ b/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 @@ -0,0 +1,17 @@ +{% if MFE_ENABLE_BASIC_AUTH|bool %} + satisfy any; + + allow 127.0.0.1; + + {% for cidr in COMMON_BASIC_AUTH_EXCEPTIONS %} + allow {{ cidr }}; + {% endfor %} + + deny all; + + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + + index index.html + proxy_set_header X-Forwarded-Proto https; +{% endif %} diff --git a/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 b/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 new file mode 100644 index 00000000000..410a542892d --- /dev/null +++ b/playbooks/roles/mfe/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 @@ -0,0 +1,12 @@ +# If you are changing this be warned that it lives in multiple places +# there is a TLS redirect to same box, and a TLS redirect to externally terminated TLS +# version of this in nginx and in mfe role. + +{% if NGINX_ALLOW_PRIVATE_IP_ACCESS %} +# This regexp matches only public IP addresses. +if ($host ~ "(\d+)(? 0 %} +location /robots.txt { + root {{ nginx_app_dir }}; + try_files $uri /robots.txt =404; +} +{% endif %} diff --git a/playbooks/roles/mfe_deployer/README.rst b/playbooks/roles/mfe_deployer/README.rst new file mode 100644 index 00000000000..8798cb11ec0 --- /dev/null +++ b/playbooks/roles/mfe_deployer/README.rst @@ -0,0 +1,204 @@ + +mfe_deployer +############ + +Overview +-------- + +The purpose of this document is to provide an overview of how micro-frontend applications (MFEs) can be deployed using the ansible roles from the edx/configuration repository. + +For a generic overview of the specific steps that are required for the deployment, please see the `Developer Documentation`_. + +Two ansible roles were created by the community to deploy MFEs using ``edx/configuration``: + +- ``mfe``: The ``mfe`` role is the base role for the deployments where a single mfe is cloned, built and set up to be served using nginx from the appserver. This role internally follows the steps for building and deployment of MFEs described in the developer documentation, with the exception that it does not use tubular. Ansible users should not directly use this role, and instead use the ``mfe_deployer`` role. +- ``mfe_deployer``: The ``mfe_deployer`` role is used to deploy several MFEs in a programmatic way. Internally, this role, make a call to the base ``mfe`` role for each MFE that is intended to be deployed. + +Configuration & Deployment +-------------------------- + +When running this role, you'll need to set the following variables: + +- ``MFES``: list that contains each one of the MFEs that will be deployed. Each MFE should contain a *name* and a *repo*. Optionally, the extra parameters below can be defined for each MFE: + - **name** (required): The name of the MFE. This is used to define the subdomain in where the MFE is going to be deployed if we are using subdomain deployments. + - **repo** (required): The name of the repository that is going to be deployed. + - **node_version**: To indicate which is the version of the node used to build the MFE, by default it takes the value of ``MFE_DEPLOY_NODE_VERSION``. + - **git_protocol**: To indicate which is the protocol used to clone the repo, by default it takes the value of ``MFE_DEPLOY_GIT_PROTOCOL``. + - **git_domain**: To indicate which is the domain of the git repository. By default it takes the value of ``MFE_DEPLOY_GIT_DOMAIN``. + - **git_path**: To indicate the git path of the MFE. By default, it takes the value of ``MFE_DEPLOY_GIT_PATH``. + - **version**: To indicate the version of the MFE that will be used. By default, it takes the value of ``MFE_DEPLOY_VERSION``. + - **git_identity**: To indicate the git identity used to clone the repository, by default it takes the value of ``MFE_DEPLOY_GIT_IDENTITY``. + - **npm_overrides**: To indicate the list of npm overrides that will be installed. Used for branding, See the `Developer Documentation`_ for more details. By default it takes the value of ``MFE_DEPLOY_NPM_OVERRIDES``. + - **env_extra**: To define custom environment variables that will be used to build this MFE. By default it takes the value of ``MFE_DEPLOY_ENVIRONMENT_EXTRA``. + - **public_path**: To define the path where the MFE will be deployed. This requires that the MFEs have compatibility with deployments in subdirectories. Regarding to the openedx ecosystem this means that the MFE should be using frontend-build>=1.3.2 and frontend-platform>=1.6.1. By default it takes the value of ``MFE_DEPLOY_PUBLIC_PATH``. + - **site_name**: Used to define the Environment SITE_NAME, used to build the MFE. By default it takes the value of ``MFE_DEPLOY_SITE_NAME``. + - **standalone_nginx**: To indicate if the MFE will be deployed in a separated nginx file or if it will be in a shared nginx file with the other MFEs, by default it takes the value of ``MFE_DEPLOY_STANDALONE_NGINX``. + + +- ``MFES_ECOMMERCE``: list of all ecommerce related MFEs you want to install. The structure matches MFES list. + + +Ecommerce related MFEs will be built in case of ecommerce service to be installed. +``MFE_DEPLOY_ECOMMERCE_MFES`` conditional variable is responsible for this and based on ``SANDBOX_ENABLE_ECOMMERCE`` variable. + + +Deployment using subdirectories +_______________________________ + +By default ``MFE_DEPLOY_STANDALONE_NGINX`` is false, which means that all the microfrontends defined in the ``MFES`` configuration will be deployed to different ``public_path``'s under the same domain (specifically, ``MFE_DEPLOY_COMMON_HOSTNAME``). + +.. code-block:: yaml + + MFES: + - name: profile + repo: frontend-app-profile + public_path: "/profile/" + - name: gradebook + repo: frontend-app-gradebook + public_path: "/gradebook/" + - name: account + repo: frontend-app-account + public_path: "/account/" + + MFES_ECOMMERCE: + - name: payment + repo: frontend-app-payment + public_path: "/payment/" + - name: ecommerce + repo: frontend-app-ecommerce + public_path: "/ecommerce/" + + ### edxapp Configurations + ### See comprehensive example below + + +Please make sure that ``public_path`` starts and ends with a '/', and check that the ``public_path`` configuration is unique for each microfrontends if ``MFE_DEPLOY_STANDALONE_NGINX`` is false. + +If we are deploying using subdirectories, it is necessary to set the ``MFE_BASE`` with the shared domain for the microfrontends. + +Deployment using subdomains +___________________________ + +If we want to deploy the microfrontends in different subdomains, we should turn on the ``MFE_DEPLOY_STANDALONE_NGINX``. As an example, this configurations are enough to deploy the profile, gradebook and account microfrontends in a different subdomain. + +.. code-block:: yaml + + MFES: + - name: profile + repo: frontend-app-profile + - name: gradebook + repo: frontend-app-gradebook + - name: account + repo: frontend-app-account + + MFES_ECOMMERCE: + - name: payment + repo: frontend-app-payment + public_path: "/payment/" + - name: ecommerce + repo: frontend-app-ecommerce + public_path: "/ecommerce/" + + MFE_DEPLOY_STANDALONE_NGINX: true + + ### edxapp Configurations + ### See comprehensive example below + +The domain used for each one of these MFEs, is defined in ``MFE_HOSTNAME``. The default value of that configuration is: + +.. code-block:: yaml + + MFE_HOSTNAME: '~^((stage|prod)-)?{{ MFE_NAME }}.*' + +Custom configurations +_____________________ + +As described previously, the compilation of the MFEs is done in the ``mfe`` role, so some configurations cannot be overridden from ``mfe_deployer``. You can see the list of all the default environment configuration in the defaults of the ``mfe`` role. The following variables can be overridden for all MFEs, but not individually: ``MFE_MARKETING_SITE_BASE_URL``, ``MFE_ENTERPRISE_MARKETING_UTM_SOURCE``, ``MFE_ENTERPRISE_MARKETING_UTM_CAMPAIGN``, and ``MFE_ENTERPRISE_MARKETING_FOOTER_UTM_MEDIUM`` for configuration related to the marketing site and ``MFE_NEW_RELIC_APP_ID`` and ``MFE_NEW_RELIC_LICENSE_KEY`` in order to configure the newrelic integration. The default environment variables are defined in the `MFE_ENVIRONMENT_DEFAULT`_ configuration. + +LMS Configuration +_________________ + +The deployment of the MFEs to the appservers will not be enough to have them working properly. Most of them require communication with the LMS, so it is necessary to configure the LMS to accept communication from the MFEs. + +The principal configurations that are needed in ansible are: ``EDXAPP_CORS_ORIGIN_WHITELIST``, ``EDXAPP_CSRF_TRUSTED_ORIGINS``, ``EDXAPP_LOGIN_REDIRECT_WHITELIST``. +They should contain the domain of the MFEs so that the LMS accepts their requests. + +It is also necessary to enable the features ENABLE_CORS_HEADERS and ENABLE_CROSS_DOMAIN_CSRF_COOKIE. They can be enabled in Koa with ``EDXAPP_ENABLE_CORS_HEADERS``, ``EDXAPP_ENABLE_CROSS_DOMAIN_CSRF_COOKIE``. + +It is also necessary to have configured JWT properly in the LMS. You can use the generate_jwt_signing_key command to generate the signing key. See `decision record about asymmetric JWT`_ for more details. + +For each MFE, it might be certain configurations that also need to be changed according to the URLs of the MFE, for instance, for the gradebook, profile and account MFE we need to set ``EDXAPP_LMS_WRITABLE_GRADEBOOK_URL``, ``EDXAPP_PROFILE_MICROFRONTEND_URL`` and ``EDXAPP_ACCOUNT_MICROFRONTEND_URL`` with their respective URLs. + +Comprehensive Example of a deployment using subdirectories +__________________________________________________________ + +.. code-block:: yaml + + MFE_BASE: "mfe.{{ EDXAPP_LMS_BASE }}" + + MFES: + - name: profile + repo: frontend-app-profile + public_path: "/profile/" + - name: gradebook + repo: frontend-app-gradebook + public_path: "/gradebook/" + - name: account + repo: frontend-app-account + public_path: "/account/" + + MFES_ECOMMERCE: + - name: payment + repo: frontend-app-payment + public_path: "/payment/" + - name: ecommerce + repo: frontend-app-ecommerce + public_path: "/ecommerce/" + + MFE_DEPLOY_STANDALONE_NGINX: false + MFE_DEPLOY_COMMON_HOSTNAME: '{{ MFE_BASE }}' + + ## edxapp Configurations + + EDXAPP_SESSION_COOKIE_DOMAIN: ".{{ EDXAPP_LMS_BASE }}" + EDXAPP_CSRF_COOKIE_SECURE: true + EDXAPP_SESSION_COOKIE_SECURE: true + EDXAPP_ENABLE_CORS_HEADERS: true + EDXAPP_ENABLE_CROSS_DOMAIN_CSRF_COOKIE: true + EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: ".{{ EDXAPP_LMS_BASE }}" + EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_NAME: "cross-domain-cookie-mfe" + + EDXAPP_CORS_ORIGIN_WHITELIST: + - "{{ EDXAPP_CMS_BASE }}" + - "{{ MFE_BASE }}" + + EDXAPP_CSRF_TRUSTED_ORIGINS: + - "{{ MFE_BASE }}" + + EDXAPP_LOGIN_REDIRECT_WHITELIST: + - "{{ EDXAPP_CMS_BASE }}" + - "{{ MFE_BASE }}" + + EDXAPP_SITE_CONFIGURATION: + - values: + ENABLE_ORDER_HISTORY_MICROFRONTEND: "{{ SANDBOX_ENABLE_ECOMMERCE }}" + + # MFE Links + EDXAPP_LMS_WRITABLE_GRADEBOOK_URL: 'https://{{ MFE_BASE}}/gradebook' + EDXAPP_PROFILE_MICROFRONTEND_URL: 'https://{{ MFE_BASE}}/profile/u/' + EDXAPP_ACCOUNT_MICROFRONTEND_URL: 'https://{{ MFE_BASE}}/account' + EDXAPP_ORDER_HISTORY_MICROFRONTEND_URL: 'https://{{ MFE_BASE }}/ecommerce/orders' + + ## ecommerce Configuration + ECOMMERCE_CORS_ORIGIN_WHITELIST: [ + "{{ EDXAPP_LMS_BASE_SCHEME }}://{{ MFE_BASE }}", + ] + ECOMMERCE_CSRF_TRUSTED_ORIGINS: [ + "{{ EDXAPP_LMS_BASE_SCHEME }}://{{ MFE_BASE }}", + ] + ECOMMERCE_CORS_ALLOW_CREDENTIALS: true + ECOMMERCE_ENABLE_PAYMENT_MFE: true + +.. _decision record about asymmetric JWT: https://github.com/openedx/edx-platform/blob/master/openedx/core/djangoapps/oauth_dispatch/docs/decisions/0008-use-asymmetric-jwts.rst +.. _Developer Documentation: https://edx.readthedocs.io/projects/edx-developer-docs/en/latest/developers_guide/micro_frontends_in_open_edx.html#overriding-brand-specific-elements +.. _MFE_ENVIRONMENT_DEFAULT: https://github.com/openedx/configuration/blob/master/playbooks/roles/mfe/defaults/main.yml#L95 diff --git a/playbooks/roles/mfe_deployer/defaults/main.yml b/playbooks/roles/mfe_deployer/defaults/main.yml new file mode 100644 index 00000000000..3edd1c580f6 --- /dev/null +++ b/playbooks/roles/mfe_deployer/defaults/main.yml @@ -0,0 +1,44 @@ +--- +MFE_DEPLOY_NODE_VERSION: '12.13.1' + +MFE_DEPLOY_GIT_PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' +MFE_DEPLOY_GIT_DOMAIN: '{{ COMMON_GIT_MIRROR }}' +MFE_DEPLOY_GIT_PATH: '{{ COMMON_GIT_PATH }}' +MFE_DEPLOY_VERSION: 'master' +MFE_DEPLOY_GIT_IDENTITY: null + +MFES: + - name: profile + repo: frontend-app-profile + public_path: "/profile/" + - name: gradebook + repo: frontend-app-gradebook + public_path: "/gradebook/" + - name: account + repo: frontend-app-account + public_path: "/account/" + +MFES_ECOMMERCE: + - name: payment + repo: frontend-app-payment + public_path: "/payment/" + - name: ecommerce + repo: frontend-app-ecommerce + public_path: "/ecommerce/" + +MFE_DEPLOY_ECOMMERCE_MFES: false +ecommerce_mfes: "{{ MFE_DEPLOY_ECOMMERCE_MFES | ternary(MFES_ECOMMERCE, []) }}" + +# Collect list of all MFEs to deploy +deploy_mfes: "{{ MFES + ecommerce_mfes }}" + +MFE_DEPLOY_PUBLIC_PATH: "/" +MFE_DEPLOY_SITE_NAME: "" + +MFE_DEPLOY_ENVIRONMENT_EXTRA: {} +MFE_DEPLOY_NPM_OVERRIDES: [] + +MFE_DEPLOY_STANDALONE_NGINX: false +MFE_DEPLOY_COMMON_HOSTNAME: "app.*" +MFE_DEPLOY_NGINX_PORT: 80 +MFE_DEPLOY_SSL_NGINX_PORT: 443 diff --git a/playbooks/roles/mfe_deployer/meta/main.yml b/playbooks/roles/mfe_deployer/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/mfe_deployer/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/mfe_deployer/tasks/main.yml b/playbooks/roles/mfe_deployer/tasks/main.yml new file mode 100644 index 00000000000..6497a90116b --- /dev/null +++ b/playbooks/roles/mfe_deployer/tasks/main.yml @@ -0,0 +1,51 @@ +--- + +- name: Build MFE + include_role: + name: mfe + vars: + MFE_NAME: "{{ custom_mfe.name }}" + MFE_REPO: "{{ custom_mfe.repo }}" + MFE_NODE_VERSION: "{{ custom_mfe.node_version | default(MFE_DEPLOY_NODE_VERSION) }}" + MFE_GIT_PROTOCOL: '{{ custom_mfe.git_protocol | default(MFE_DEPLOY_GIT_PROTOCOL) }}' + MFE_GIT_DOMAIN: '{{ custom_mfe.git_domain | default(MFE_DEPLOY_GIT_DOMAIN) }}' + MFE_GIT_PATH: '{{ custom_mfe.git_path | default(MFE_DEPLOY_GIT_PATH) }}' + MFE_VERSION: '{{ custom_mfe.version | default(MFE_DEPLOY_VERSION) }}' + MFE_GIT_IDENTITY: '{{ custom_mfe.git_identity | default(MFE_DEPLOY_GIT_IDENTITY) }}' + MFE_NPM_OVERRIDES: '{{ custom_mfe.npm_overrides | default(MFE_DEPLOY_NPM_OVERRIDES) }}' + MFE_ENVIRONMENT_EXTRA: '{{ custom_mfe.env_extra | default(MFE_DEPLOY_ENVIRONMENT_EXTRA) }}' + MFE_PUBLIC_PATH: '{{ custom_mfe.public_path | default(MFE_DEPLOY_PUBLIC_PATH) }}' + MFE_SITE_NAME: '{{ custom_mfe.site_name | default(MFE_DEPLOY_SITE_NAME) }}' + MFE_STANDALONE_NGINX: '{{ custom_mfe.standalone_nginx | default(MFE_DEPLOY_STANDALONE_NGINX) }}' + loop: "{{ deploy_mfes }}" + loop_control: + loop_var: custom_mfe + tags: + - install:base + +- name: Copying nginx configs for the service + template: + src: "edx/app/nginx/sites-available/app.j2" + dest: "{{ nginx_sites_available_dir }}/mfes" + owner: root + group: "{{ common_web_user }}" + mode: 0640 + when: nginx_app_dir is defined and not MFE_DEPLOY_STANDALONE_NGINX + notify: reload nginx + tags: + - install:base + - install:vhosts + +- name: Creating nginx config links for the service + file: + src: "{{ nginx_sites_available_dir }}/mfes" + dest: "{{ nginx_sites_enabled_dir }}/mfes" + state: link + owner: root + group: root + when: nginx_app_dir is defined and not MFE_DEPLOY_STANDALONE_NGINX + notify: reload nginx + tags: + - install:base + - install:vhosts + diff --git a/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/app.j2 b/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/app.j2 new file mode 100644 index 00000000000..f38939d92d5 --- /dev/null +++ b/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/app.j2 @@ -0,0 +1,39 @@ +# +# {{ ansible_managed }} +# + +server { + server_name {{ MFE_DEPLOY_COMMON_HOSTNAME }}; + listen {{ MFE_DEPLOY_NGINX_PORT }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ MFE_DEPLOY_NGINX_PORT }}; + {% endif %} + +{% if NGINX_ENABLE_SSL %} + {% include "concerns/handle-ip-disclosure.j2" %} + rewrite ^ https://$host$request_uri? permanent; +{% else %} + {% if NGINX_REDIRECT_TO_HTTPS %} + {% include "concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2" %} + {% include "concerns/handle-tls-terminated-elsewhere-redirect.j2" %} + {% else %} + {% include "concerns/mfe.j2" %} + {% endif %} +{% endif %} + +} + +{% if NGINX_ENABLE_SSL %} +server { + server_name {{ MFE_DEPLOY_COMMON_HOSTNAME }}; + listen {{ MFE_DEPLOY_SSL_NGINX_PORT }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ MFE_DEPLOY_SSL_NGINX_PORT }} ssl; + {% endif %} + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + {% include "concerns/mfe.j2" %} +} +{% endif %} diff --git a/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 b/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 new file mode 100644 index 00000000000..bb5880afef1 --- /dev/null +++ b/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 @@ -0,0 +1,17 @@ +{% if MFE_ENABLE_BASIC_AUTH|bool %} + satisfy any; + + allow 127.0.0.1; + + {% for cidr in COMMON_BASIC_AUTH_EXCEPTIONS %} + allow {{ cidr }}; + {% endfor %} + + deny all; + + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + + index index.html + proxy_set_header X-Forwarded-Proto https; +{% endif %} diff --git a/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 b/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 new file mode 100644 index 00000000000..410a542892d --- /dev/null +++ b/playbooks/roles/mfe_deployer/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 @@ -0,0 +1,12 @@ +# If you are changing this be warned that it lives in multiple places +# there is a TLS redirect to same box, and a TLS redirect to externally terminated TLS +# version of this in nginx and in mfe role. + +{% if NGINX_ALLOW_PRIVATE_IP_ACCESS %} +# This regexp matches only public IP addresses. +if ($host ~ "(\d+)(? 0 %} +location /robots.txt { + root {{ nginx_app_dir }}; + try_files $uri /robots.txt =404; +} +{% endif %} diff --git a/playbooks/roles/mfe_flags_setup/defaults/main.yml b/playbooks/roles/mfe_flags_setup/defaults/main.yml new file mode 100644 index 00000000000..d72d9c799ee --- /dev/null +++ b/playbooks/roles/mfe_flags_setup/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +MFE_FLAGS_SETUP_FLAGS_LIST: + - account.redirect_to_microfrontend + - order_history.redirect_to_microfrontend diff --git a/playbooks/roles/mfe_flags_setup/tasks/main.yml b/playbooks/roles/mfe_flags_setup/tasks/main.yml new file mode 100644 index 00000000000..d3573060912 --- /dev/null +++ b/playbooks/roles/mfe_flags_setup/tasks/main.yml @@ -0,0 +1,37 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role mfe_flags_setup +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: Get edxapp waffle flags list + shell: > + {{ edxapp_venv_bin }}/python {{ COMMON_BIN_DIR }}/manage.edxapp lms waffle_flag -l --settings={{ COMMON_EDXAPP_SETTINGS }} + become_user: "{{ edxapp_user }}" + environment: "{{ edxapp_environment }}" + register: edxapp_waffle_flags_list + +- name: Create MFE waffle flag if it does not exist + shell: > + {{ edxapp_venv_bin }}/python {{ COMMON_BIN_DIR }}/manage.edxapp lms waffle_flag {{ item }} --everyone --create --settings={{ COMMON_EDXAPP_SETTINGS }} + become_user: "{{ edxapp_user }}" + environment: "{{ edxapp_environment }}" + when: item not in edxapp_waffle_flags_list.stdout + loop: "{{ MFE_FLAGS_SETUP_FLAGS_LIST }}" diff --git a/playbooks/roles/minos/defaults/main.yml b/playbooks/roles/minos/defaults/main.yml new file mode 100644 index 00000000000..736bee8a087 --- /dev/null +++ b/playbooks/roles/minos/defaults/main.yml @@ -0,0 +1,45 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role minos +# +MINOS_GIT_IDENTITY: !!null + +MINOS_SERVICE_CONFIG: + aws_profile: !!null + aws_region: "{{ MINOS_AWS_REGION }}" + s3_bucket: "{{ COMMON_OBJECT_STORE_LOG_SYNC_BUCKET }}" + bucket_path: lifecycle/minos + voter_conf_d: "{{ minos_voter_cfg }}" + +MINOS_AWS_REGION: 'us-east-1' +# +# vars are namespace with the module name. +# +minos_service_name: minos +minos_data_dir: "{{ COMMON_DATA_DIR }}/{{ minos_service_name }}" +minos_app_dir: "{{ COMMON_APP_DIR }}/{{ minos_service_name }}" +minos_log_dir: "{{ COMMON_LOG_DIR }}/{{ minos_service_name }}" +minos_cfg_file: "{{ COMMON_CFG_DIR }}/minos.yml" +minos_voter_cfg: "{{ COMMON_CFG_DIR }}/{{ minos_service_name }}/conf.d/" +minos_git_ssh: "/tmp/git.sh" +minos_git_identity: "{{ minos_app_dir }}/minos-git-identity" +minos_edx_server_tools_repo: "git@github.com/edx/edx-minos.git" +minos_edx_server_tools_branch: "master" +MINOS_EDX_SERVER_TOOLS_VERSION: "0.4" +minos_requirement: "git+ssh://{{ minos_edx_server_tools_repo }}@{{ minos_edx_server_tools_branch }}#egg=edx-minos" + +# +# OS packages +# + +minos_debian_pkgs: [] + +minos_redhat_pkgs: [] diff --git a/playbooks/roles/minos/meta/main.yml b/playbooks/roles/minos/meta/main.yml new file mode 100644 index 00000000000..ddb7a456ce9 --- /dev/null +++ b/playbooks/roles/minos/meta/main.yml @@ -0,0 +1,29 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role minos +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } +dependencies: + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ minos_service_name }}" + edx_service_with_rendered_config_service_config: "{{ MINOS_SERVICE_CONFIG }}" + edx_service_with_rendered_config_user: root + edx_service_with_rendered_config_home: "{{ minos_app_dir }}" + edx_service_with_rendered_config_packages: + debian: "{{ minos_debian_pkgs }}" + redhat: "{{ minos_redhat_pkgs }}" diff --git a/playbooks/roles/minos/tasks/main.yml b/playbooks/roles/minos/tasks/main.yml new file mode 100644 index 00000000000..058dd968f8e --- /dev/null +++ b/playbooks/roles/minos/tasks/main.yml @@ -0,0 +1,83 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role minos +# +# Overview: +# +# Install the, currently private, minos application +# which determines whether or not it is safe to retire +# a server +# +# Dependencies: +# +# Relies on the common role. +# +# Example play: +# +# - name: Deploy minos +# hosts: all +# sudo: True +# gather_facts: True +# vars: +# COMMON_ENABLE_MINOS: True +# roles: +# - common +# - minos +# + +- name: Create minos config directory + file: + path: "{{ minos_voter_cfg }}" + state: directory + owner: root + group: root + mode: "0755" + +- name: Create minos voters configs + template: + dest: "{{ minos_voter_cfg }}/{{ item }}.yml" + src: "edx/etc/minos/conf.d/{{ item }}.yml.j2" + mode: "0755" + owner: root + group: root + with_items: + - "BellwetherVoter" + - "ProccessQuiescenceVoterPython" + - "TrackingLogVoter" + - "ZippedTrackingLogVoter" + - "RolledTrackingLogVoter" + +# Optional auth for git +- name: Create ssh script for git + template: + src: "tmp/git-identity.sh.j2" + dest: "{{ minos_git_ssh }}" + mode: "0750" + +- name: Install read-only ssh key + copy: + content: "{{ MINOS_GIT_IDENTITY }}" + dest: "{{ minos_git_identity }}" + force: yes + mode: "0600" + +- name: Install python custom-requirements + pip: + name: "{{ item }}" + virtualenv: "{{ minos_app_dir }}/venvs/" + state: present + extra_args: "--exists-action w" + version: "{{ MINOS_EDX_SERVER_TOOLS_VERSION }}" + environment: + GIT_SSH: "{{ minos_git_ssh }}" + with_items: + - "{{ minos_requirement }}" diff --git a/playbooks/roles/minos/templates/edx/etc/minos/conf.d/BellwetherVoter.yml.j2 b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/BellwetherVoter.yml.j2 new file mode 100644 index 00000000000..e8fef969d9b --- /dev/null +++ b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/BellwetherVoter.yml.j2 @@ -0,0 +1,2 @@ +BellwetherVoter: + config: \ No newline at end of file diff --git a/playbooks/roles/minos/templates/edx/etc/minos/conf.d/ProccessQuiescenceVoterPython.yml.j2 b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/ProccessQuiescenceVoterPython.yml.j2 new file mode 100644 index 00000000000..e35ef92821f --- /dev/null +++ b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/ProccessQuiescenceVoterPython.yml.j2 @@ -0,0 +1,4 @@ +ProccessQuiescenceVoter: + config: + process_name: 'python' + username: '{{ common_web_user }}' diff --git a/playbooks/roles/minos/templates/edx/etc/minos/conf.d/RolledTrackingLogVoter.yml.j2 b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/RolledTrackingLogVoter.yml.j2 new file mode 100644 index 00000000000..e3f7e169bed --- /dev/null +++ b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/RolledTrackingLogVoter.yml.j2 @@ -0,0 +1,3 @@ +RolledTrackingLogVoter: + config: + tracking_directory: '{{ COMMON_LOG_DIR }}/tracking' \ No newline at end of file diff --git a/playbooks/roles/minos/templates/edx/etc/minos/conf.d/TrackingLogVoter.yml.j2 b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/TrackingLogVoter.yml.j2 new file mode 100644 index 00000000000..5d9e1f34169 --- /dev/null +++ b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/TrackingLogVoter.yml.j2 @@ -0,0 +1,6 @@ +TrackingLogVoter: + config: + aws_profile: !!null + local_directory: '{{ COMMON_LOG_DIR }}/tracking' + s3_bucket: '{{ COMMON_OBJECT_STORE_LOG_SYNC_BUCKET }}' + bucket_path_prefix: 'logs/tracking' diff --git a/playbooks/roles/minos/templates/edx/etc/minos/conf.d/ZippedTrackingLogVoter.yml.j2 b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/ZippedTrackingLogVoter.yml.j2 new file mode 100644 index 00000000000..aa774a5f5fe --- /dev/null +++ b/playbooks/roles/minos/templates/edx/etc/minos/conf.d/ZippedTrackingLogVoter.yml.j2 @@ -0,0 +1,3 @@ +ZippedTrackingLogVoter: + config: + tracking_directory: '{{ COMMON_LOG_DIR }}/tracking' \ No newline at end of file diff --git a/playbooks/roles/minos/templates/tmp/git-identity.sh.j2 b/playbooks/roles/minos/templates/tmp/git-identity.sh.j2 new file mode 100644 index 00000000000..374ae08e8f8 --- /dev/null +++ b/playbooks/roles/minos/templates/tmp/git-identity.sh.j2 @@ -0,0 +1,2 @@ +#!/bin/sh +exec /usr/bin/ssh -o StrictHostKeyChecking=no {% if MINOS_GIT_IDENTITY %}-i {{ minos_git_identity }}{% endif %} "$@" diff --git a/playbooks/roles/mongo b/playbooks/roles/mongo new file mode 120000 index 00000000000..689d2cb19b9 --- /dev/null +++ b/playbooks/roles/mongo @@ -0,0 +1 @@ +mongo_3_2 \ No newline at end of file diff --git a/playbooks/roles/mongo/defaults/main.yml b/playbooks/roles/mongo/defaults/main.yml deleted file mode 100644 index f42c4e32193..00000000000 --- a/playbooks/roles/mongo/defaults/main.yml +++ /dev/null @@ -1,41 +0,0 @@ -mongo_logappend: true -mongo_version: 2.4.7 -mongo_port: "27017" -mongo_extra_conf: '' -mongo_key_file: '/etc/mongodb_key' -mongo_repl_set: rs0 -mongo_cluster_members: [] - -mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" -mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" -mongo_user: mongodb - -# Vars Meant to be overridden -MONGO_USERS: - - user: cs_comments_service - password: password - database: cs_comments_service - - user: exdapp - password: password - database: edxapp - -MONGO_CLUSTERED: !!null -MONGO_BIND_IP: 127.0.0.1 - -## - -mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" -mongo_dbpath: "{{ mongo_data_dir }}/mongodb" - -# Have to use this conditional instead of ignore errors -# because the mongo_user module fails and doesn't ginore errors. -mongo_create_users: !!null - -# If the system is running out of an Amazon Web Services -# cloudformation stack, this group name can used to pull out -# the name of the stack the mongo server resides in. -mongo_aws_stack_name: "tag_aws_cloudformation_stack-name_" - -# In environments that do not require durability (devstack / Jenkins) -# you can disable the journal to reduce disk usage -mongo_enable_journal: True diff --git a/playbooks/roles/mongo/handlers/main.yml b/playbooks/roles/mongo/handlers/main.yml deleted file mode 100644 index 108f006d984..00000000000 --- a/playbooks/roles/mongo/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- name: restart mongo - service: name=mongodb state=restarted - diff --git a/playbooks/roles/mongo/tasks/main.yml b/playbooks/roles/mongo/tasks/main.yml deleted file mode 100644 index 701c2256685..00000000000 --- a/playbooks/roles/mongo/tasks/main.yml +++ /dev/null @@ -1,78 +0,0 @@ - ---- -- name: install python pymongo for mongo_user ansible module - pip: > - name=pymongo state=present - version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" - -- name: add the mongodb signing key - apt_key: > - id=7F0CEB10 - url=http://docs.mongodb.org/10gen-gpg-key.asc - state=present - -- name: add the mongodb repo to the sources list - apt_repository: > - repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' - state=present - -- name: install mongo server and recommends - apt: > - pkg=mongodb-10gen={{ mongo_version }} - state=present install_recommends=yes - update_cache=yes - -- name: create mongo dirs - file: > - path="{{ item }}" state=directory - owner="{{ mongo_user }}" - group="{{ mongo_user }}" - with_items: - - "{{ mongo_data_dir }}" - - "{{ mongo_dbpath }}" - - "{{ mongo_log_dir }}" - -- name: stop mongo service - service: name=mongodb state=stopped - -- name: move mongodb to {{ mongo_data_dir }} - command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb - - -- name: copy mongodb key file - copy: > - src={{ secure_dir }}/files/mongo_key - dest={{ mongo_key_file }} - mode=0600 - owner=mongodb - group=mongodb - when: MONGO_CLUSTERED - -- name: copy configuration template - template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes - notify: restart mongo - -- name: start mongo service - service: name=mongodb state=started - -- name: wait for mongo server to start - wait_for: port=27017 delay=2 - -- name: Create the file to initialize the mongod replica set - template: src=repset_init.j2 dest=/tmp/repset_init.js - when: MONGO_CLUSTERED - -- name: Initialize the replication set - shell: /usr/bin/mongo /tmp/repset_init.js - when: MONGO_CLUSTERED - -# Ignore errors doesn't work because the module throws an exception -# it doesn't catch. -- name: create a mongodb user - mongodb_user: > - database={{ item.database }} - name={{ item.user }} - password={{ item.password }} - state=present - with_items: MONGO_USERS - when: mongo_create_users diff --git a/playbooks/roles/mongo/templates/repset_init.j2 b/playbooks/roles/mongo/templates/repset_init.j2 deleted file mode 100644 index 0f08f189df5..00000000000 --- a/playbooks/roles/mongo/templates/repset_init.j2 +++ /dev/null @@ -1,50 +0,0 @@ -{# Generate a list of hosts if no cluster members are give. Otherwise use the - hosts provided in the variable. -#} -{%- if mongo_cluster_members|length == 0 -%} - {%- set hosts = [] -%} - {%- set all_mongo_hosts = [] -%} - {%- do all_mongo_hosts.extend(groups.tag_role_mongo) -%} - {%- do all_mongo_hosts.extend(groups.tag_group_mongo) -%} - {%- for name in group_names -%} - {%- if name.startswith(mongo_aws_stack_name) -%} - {%- for host in all_mongo_hosts -%} - {%- if host in groups[name] -%} - {% do hosts.append("ip-" + host.replace('.','-') + ":" + mongo_port) %} - {%- endif -%} - {%- endfor -%} - {%- endif -%} - {%- endfor -%} -{%- else -%} - {%- set hosts = mongo_cluster_members -%} -{%- endif -%} - -config = {_id: '{{ mongo_repl_set }}', members: [ - {%- for host in hosts -%} - {_id: {{ loop.index }}, host: '{{ host }}'}{% if not loop.last %},{% endif %} - {%- endfor -%} - ]}; -rs.initiate(config) - -sleep(30000) -rs.slaveOk() -printjson(rs.status()) - -// Check that the cluster is ok -if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';} - -// Check that the cluster has the right number of members -// and add them if we are the master -if(rs.isMaster().ismaster) { - if(rs.status().members.length!={{ hosts|length }}) { - {% for host in mongo_cluster_members %} - rs.add({_id: {{ loop.index }}, host: '{{ host }}'}); - {% endfor %} - sleep(30000); - // Check status and member account, throw exception if not - if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';} - if(rs.status().members.length!={{ hosts|length }}) { - throw 'Could not add all members to cluster' - } - } -} diff --git a/playbooks/roles/mongo_2_6/defaults/main.yml b/playbooks/roles/mongo_2_6/defaults/main.yml new file mode 100644 index 00000000000..768b9a59450 --- /dev/null +++ b/playbooks/roles/mongo_2_6/defaults/main.yml @@ -0,0 +1,68 @@ +mongo_logappend: true +MONGO_VERSION: 2.6.5 +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' +mongo_repl_set: rs0 +mongo_cluster_members: [] +PYMONGO_VERSION: 2.7.2 + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_APT_KEY: "7F0CEB10" +MONGODB_APT_KEYSERVER: "keyserver.ubuntu.com" +MONGODB_REPO: "deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen" + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +MONGO_CLUSTERED: false +MONGO_BIND_IP: 127.0.0.1 +MONGO_AUTH: true + +MONGO_USE_SMALLFILES: true + +## + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# If the system is running out of an Amazon Web Services +# cloudformation stack, this group name can used to pull out +# the name of the stack the mongo server resides in. +mongo_aws_stack_name: "tag_aws_cloudformation_stack-name_" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: True + +# We can do regular backups of MongoDB to S3. +MONGO_S3_BACKUP: false +# backup cron time: +MONGO_S3_BACKUP_HOUR: "*/12" +MONGO_S3_BACKUP_DAY: "*" +# override with a secondary node that will perform backups +MONGO_S3_BACKUP_NODE: "undefined" +# back up data into a specific S3 bucket +MONGO_S3_BACKUP_BUCKET: "undefined" +# temporary directory mongodump will use to store data +MONGO_S3_BACKUP_TEMPDIR: "{{ mongo_data_dir }}" +MONGO_S3_NOTIFY_EMAIL: "dummy@example.com" +mongo_s3_logfile: "{{ COMMON_LOG_DIR }}/mongo/s3-mongo-backup.log" +MONGO_S3_S3CMD_CONFIG: "{{ COMMON_DATA_DIR }}/mongo-s3-backup.s3cfg" +MONGO_S3_BACKUP_AWS_ACCESS_KEY: !!null +MONGO_S3_BACKUP_AWS_SECRET_KEY: !!null + diff --git a/playbooks/roles/mongo_2_6/handlers/main.yml b/playbooks/roles/mongo_2_6/handlers/main.yml new file mode 100644 index 00000000000..e5778babcfb --- /dev/null +++ b/playbooks/roles/mongo_2_6/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart mongo + service: + name: mongod + state: restarted + diff --git a/playbooks/roles/mongo_2_6/meta/main.yml b/playbooks/roles/mongo_2_6/meta/main.yml new file mode 100644 index 00000000000..2083f0e1251 --- /dev/null +++ b/playbooks/roles/mongo_2_6/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/playbooks/roles/mongo_2_6/tasks/main.yml b/playbooks/roles/mongo_2_6/tasks/main.yml new file mode 100644 index 00000000000..c3b6dfe6ed1 --- /dev/null +++ b/playbooks/roles/mongo_2_6/tasks/main.yml @@ -0,0 +1,274 @@ +--- +- name: Check to see that MongoDB 2.4 is not installed + stat: + path: /etc/init.d/mongodb + register: mongodb_needs_upgrade + tags: + - install + - install:base + +- name: Verify 2.4 not installed + fail: + msg: "MongoDB 2.4 is currently installed and cannot be safely upgraded in a clustered configuration. Please read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6." + when: mongodb_needs_upgrade.stat.exists and MONGO_CLUSTERED + tags: + - install + - install:base + +- name: Remove mongo 2.4 if present + apt: + pkg: mongodb-10gen + state: absent + purge: yes + force: yes + when: mongodb_needs_upgrade.stat.exists and not MONGO_CLUSTERED + tags: + - install + - install:base + +- name: Install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - install + - install:base + +- name: Add the mongodb signing key + apt_key: + id: "{{ MONGODB_APT_KEY }}" + keyserver: "{{ MONGODB_APT_KEYSERVER }}" + state: present + register: add_mongo_signing_key + retries: 3 + tags: + - install + - install:base + until: add_mongo_signing_key is succeeded + +- name: Add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - install + - install:base + +- name: Install mongo server and recommends + apt: + name: "mongodb-org={{ MONGO_VERSION }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + tags: + - install + - install:base + +- name: Create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - install + - install:base + +- name: Add mongod systemd configuration on 16.04 + template: + src: "etc/systemd/system/mongod.service.j2" + dest: "/etc/systemd/system/mongod.service" + notify: + - restart mongo + when: ansible_distribution_release == 'xenial' + tags: + - install + - install:configuration + +- name: enable mongod systemd unit on 16.04 + systemd: + name: mongod + enabled: yes + daemon_reload: yes + when: ansible_distribution_release == 'xenial' + tags: + - install + - install:configuration + +- name: Stop mongod service + service: + name: mongod + state: stopped + tags: + - manage + - manage:stop + +- name: Move mongodb to {{ mongo_data_dir }} + command: "mv /var/lib/mongodb {{ mongo_data_dir}}/." + args: + creates: "{{ mongo_data_dir }}/mongodb" + tags: + - install + - install:base + +- name: Copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: "0600" + owner: mongodb + group: mongodb + when: MONGO_CLUSTERED + tags: + - install + - install:configuration + +- name: Copy configuration template + template: + src: "mongodb.conf.j2" + dest: "/etc/mongod.conf" + backup: yes + notify: + - restart mongo + tags: + - install + - install:configuration + +- name: Start mongo service + service: + name: mongod + state: started + tags: + - manage + - manage:start + +- name: Wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - manage + - manage:start + +- name: Drop super user script + template: + src: "create_root.js.j2" + dest: "/tmp/create_root.js" + when: not MONGO_CLUSTERED + tags: + - install + - install:configuration + +- name: Create super user with js + shell: "/usr/bin/mongo admin /tmp/create_root.js" + when: not MONGO_CLUSTERED + tags: + - install + - install:configuration + +- name: Delete super user script + file: + path: /tmp/create_root.js + state: absent + when: not MONGO_CLUSTERED + tags: + - install + - install:configuration + +- name: Create the file to initialize the mongod replica set + template: + src: "repset_init.js.j2" + dest: "/tmp/repset_init.js" + when: MONGO_CLUSTERED + tags: + - install + - install:configuration + +- name: Initialize the replication set + shell: "/usr/bin/mongo /tmp/repset_init.js" + when: MONGO_CLUSTERED + tags: + - install + - install:configuration + +#- name: delete repset script +# file: path=/tmp/repset_init.js state=absent +# when: MONGO_CLUSTERED + +- name: Create a mongodb user + mongodb_user: + database: "{{ item.database }}" + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + with_items: "{{ MONGO_USERS }}" + when: not MONGO_CLUSTERED + tags: + - manage + - manage:app-users + +- name: Create a mongodb user + mongodb_user: + database: "{{ item.database }}" + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ mongo_repl_set }}" + with_items: "{{ MONGO_USERS }}" + when: MONGO_CLUSTERED + tags: + - manage + - manage:app-users + +- name: Install s3cmd + apt: + name: "s3cmd" + state: present + when: MONGO_S3_BACKUP + tags: + - install + - install:app-requirements + +- name: Configure s3cmd and install backup-mongo-to-s3 script + template: + dest: "{{ item.dest }}" + src: "{{ item.src }}" + owner: root + group: root + mode: "{{ item.mode }}" + when: MONGO_S3_BACKUP + with_items: + - { src: 'mongo-s3-backup-s3cfg.j2', dest: '{{ MONGO_S3_S3CMD_CONFIG }}', mode: '0600' } + - { src: 'backup-mongo-to-s3.j2', dest: '/edx/bin/backup-mongo-to-s3.sh', mode: '0700' } + tags: + - install + - install:configuration + +- name: Schedule backup-mongo-to-3s crontab + cron: + name: "backup-mongo-to-s3" + job: "/edx/bin/backup-mongo-to-s3.sh" + backup: yes + cron_file: backup-mongo-to-s3 + user: root + hour: "{{ MONGO_S3_BACKUP_HOUR }}" + minute: "0" + day: "{{ MONGO_S3_BACKUP_DAY }}" + when: MONGO_S3_BACKUP + tags: + - install + - install:configuration diff --git a/playbooks/roles/mongo_2_6/templates/backup-mongo-to-s3.j2 b/playbooks/roles/mongo_2_6/templates/backup-mongo-to-s3.j2 new file mode 100644 index 00000000000..c44d19aa8cc --- /dev/null +++ b/playbooks/roles/mongo_2_6/templates/backup-mongo-to-s3.j2 @@ -0,0 +1,68 @@ +{% set lb = '{' %} +{% set rb = '}' %} +#!/bin/bash +# + +exec > >(tee "{{ mongo_s3_logfile }}") +exec 2>&1 + +shopt -s extglob + +usage() { + + cat< /sys/kernel/mm/transparent_hugepage/enabled + echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag +end script diff --git a/playbooks/roles/mongo_3_0/handlers/main.yml b/playbooks/roles/mongo_3_0/handlers/main.yml new file mode 100644 index 00000000000..83a9ecdaf94 --- /dev/null +++ b/playbooks/roles/mongo_3_0/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: restart mongo + service: name=mongod state=restarted + diff --git a/playbooks/roles/mongo_3_0/meta/main.yml b/playbooks/roles/mongo_3_0/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_3_0/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_3_0/tasks/main.yml b/playbooks/roles/mongo_3_0/tasks/main.yml new file mode 100644 index 00000000000..762813e9b41 --- /dev/null +++ b/playbooks/roles/mongo_3_0/tasks/main.yml @@ -0,0 +1,350 @@ +--- +- name: disable transparent huge pages on startup (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: disable-transparent-hugepages.conf + dest: /etc/init/disable-transparent-hugepages.conf + owner: root + group: root + mode: 0755 + tags: + - "hugepages" + - "install" + - "install:system-requirements" + +- name: disable transparent huge pages + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "install" + - "install:system-requirements" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:system-requirements" + +- name: add the mongodb signing key + apt_key: + id: "{{ MONGODB_APT_KEY }}" + keyserver: "{{ MONGODB_APT_KEYSERVER }}" + state: present + tags: + - "install" + - "install:system-requirements" + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:system-requirements" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - install + - install:app-requirements + - mongo_packages + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:configuration" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_default_ipv4['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + when: MONGO_CLUSTERED + ignore_errors: true + tags: + - "install" + - "install:configuration" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_default_ipv4['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: MONGO_CLUSTERED and authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "install" + - "install:configuration" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:configuration" + - "update_mongod_conf" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: MONGO_CLUSTERED + and authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "install" + - "install:configuration" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: MONGO_CLUSTERED and initialize_replica_set + tags: + - "install" + - "install:configuration" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + when: MONGO_CLUSTERED + notify: restart mongo + tags: + - "install" + - "install:configuration" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + notify: restart mongo + register: update_mongod_conf + tags: + - "install" + - "install:configuration" + - "update_mongod_conf" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "install" + - "install:configuration" + - "logrotate" + +- name: restart mongo service if we changed our configuration + service: + name: mongod + state: restarted + when: update_mongod_conf.changed + tags: + - "install" + - "install:configuration" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "install" + - "install:configuration" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db" + +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + run_once: true + when: not initialize_replica_set + tags: + - "manage" + - "manage:db" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: MONGO_CLUSTERED and initialize_replica_set + tags: + - "install" + - "install:configuration" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: MONGO_CLUSTERED and initialize_replica_set + tags: + - "install" + - "install:configuration" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: MONGO_CLUSTERED and initialize_replica_set + tags: + - "install" + - "install:configuration" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: MONGO_CLUSTERED and initialize_replica_set + tags: + - "install" + - "install:configuration" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: MONGO_CLUSTERED + tags: + - "manage" + - "manage:db" + - "configure_replica_set" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_default_ipv4['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + retries: 5 + delay: 2 + run_once: true + when: MONGO_CLUSTERED + tags: + - "manage" + - "manage:db" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: MONGO_CLUSTERED + tags: + - "manage" + - "manage:db" + +- name: create mongodb users in a standalone configuration + mongodb_user: + database: "{{ item.database }}" + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + with_items: "{{ MONGO_USERS }}" + when: not MONGO_CLUSTERED + tags: + - "manage" + - "manage:db" diff --git a/playbooks/roles/mongo_3_0/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_3_0/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_3_0/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_3_0/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_3_0/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..7086a9a96ff --- /dev/null +++ b/playbooks/roles/mongo_3_0/templates/mongo_logrotate.j2 @@ -0,0 +1,30 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_3_0/templates/mongod.conf.j2 b/playbooks/roles/mongo_3_0/templates/mongod.conf.j2 new file mode 100644 index 00000000000..5e692f9f6f4 --- /dev/null +++ b/playbooks/roles/mongo_3_0/templates/mongod.conf.j2 @@ -0,0 +1,50 @@ +# Do not edit this file directly, it was generated by ansible +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if MONGO_CLUSTERED and not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: +{% if not MONGO_CLUSTERED %} + {# Bind to all ips(default) if in clustered mode, + otherwise only to the specified local ip. #} + bindIp: {{ MONGO_BIND_IP }} +{% endif %} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_3_2/defaults/main.yml b/playbooks/roles/mongo_3_2/defaults/main.yml new file mode 100644 index 00000000000..9a10a9adb05 --- /dev/null +++ b/playbooks/roles/mongo_3_2/defaults/main.yml @@ -0,0 +1,115 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "3.2" +MONGO_VERSION_PATCH: "16" +PYMONGO_VERSION: "3.2.2" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +# mongo 3.2 does not have any source list for Bionic and Focal +# use Xenial repo source list to install mongo 3.2 +MONGODB_REPO_XENIAL: "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_3_2/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_3_2/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_3_2/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_3_2/files/etc/systemd/system/mongod.service.d/restart.conf b/playbooks/roles/mongo_3_2/files/etc/systemd/system/mongod.service.d/restart.conf new file mode 100644 index 00000000000..e072e8619c2 --- /dev/null +++ b/playbooks/roles/mongo_3_2/files/etc/systemd/system/mongod.service.d/restart.conf @@ -0,0 +1,3 @@ +[Service] +Restart=always +RestartSec=5 diff --git a/playbooks/roles/mongo_3_2/meta/main.yml b/playbooks/roles/mongo_3_2/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_3_2/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_3_2/tasks/main.yml b/playbooks/roles/mongo_3_2/tasks/main.yml new file mode 100644 index 00000000000..62981d9cf50 --- /dev/null +++ b/playbooks/roles/mongo_3_2/tasks/main.yml @@ -0,0 +1,459 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +# add Bionic source list to install libssl1.0.0 on Focal +# so mongo3.2 installation does not fail with dependencies +- name: add source list to install libssl1.0.0 + apt_repository: + repo: "deb http://security.ubuntu.com/ubuntu bionic-security main" + state: present + tags: + - "install" + - "install:app-requirements" + when: ansible_distribution_release == 'focal' + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release != 'focal' + +# mongo 3.2 does not have any source list for Bionic and Focal +# use Xenial repo source list to install mongo 3.2 +- name: add the mongodb repo to the sources list for focal + apt_repository: + repo: "{{ MONGODB_REPO_XENIAL }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release == 'focal' + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: systemd mongod service override dir exists + file: + path: "/etc/systemd/system/mongod.service.d/" + state: directory + owner: root + group: root + mode: 0755 + tags: + - "install" + - "install:configuration" + +- name: copy mongod service override file + copy: + src: "etc/systemd/system/mongod.service.d/restart.conf" + dest: "/etc/systemd/system/mongod.service.d/restart.conf" + owner: root + group: root + mode: 0644 + register: mongod_service_override + tags: + - "install" + - "install:configuration" + +- name: reload systemd if override files changed + command: + cmd: "systemctl daemon-reload" + when: mongod_service_override.changed + tags: + - "install" + - "install:configuration" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_3_2/templates/backup-mongo.sh.j2 b/playbooks/roles/mongo_3_2/templates/backup-mongo.sh.j2 new file mode 100644 index 00000000000..61ea25d005d --- /dev/null +++ b/playbooks/roles/mongo_3_2/templates/backup-mongo.sh.j2 @@ -0,0 +1,133 @@ +#!/bin/bash +# ref https://tasks.opencraft.com/browse/SE-1669 +# Script to perform a point-in-time dump of the local mongodb database using +# mongodump. +# includes locking (prevent this script from running multiple times in +# parallel), creating a snapshot of the volume used to backup to + +# exit by default on failure +set -e +# verbose for help with debugging +set -x + +# make sure local/bin is in the path so we can use aws cli +PATH="$PATH:/usr/local/bin" + +# vars set by ansible +MONGO_BACKUP_EBS_VOLUME_DEVICE="{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" # eg. /dev/svdk or /dev/disk/by-label/mylabel +MONGO_BACKUP_EBS_VOLUME_ID="{{ MONGO_BACKUP_EBS_VOLUME_ID }}" # eg. vol-123456 +MONGO_BACKUP_VOLUME_MOUNT_PATH="{{ MONGO_BACKUP_VOLUME_MOUNT_PATH }}" # eg. /mnt/mongobackup/ +MONGO_BACKUP_NODE="{{ MONGO_BACKUP_NODE }}" +EDXAPP_MONGO_DB_NAME="{{ EDXAPP_MONGO_DB_NAME }}" +MONGO_BACKUP_AUTH_DATABASE="{{ MONGO_BACKUP_AUTH_DATABASE }}" +MONGO_ADMIN_USER="{{ MONGO_ADMIN_USER }}" +MONGO_ADMIN_PASSWORD="{{ MONGO_ADMIN_PASSWORD }}" +AWS_ACCESS_KEY_ID="{{ MONGO_BACKUP_AWS_ACCESS_KEY_ID }}" +AWS_SECRET_ACCESS_KEY="{{ MONGO_BACKUP_AWS_SECRET_ACCESS_KEY }}" +MONGO_BACKUP_SNAPSHOT_DESC="{{ MONGO_BACKUP_SNAPSHOT_DESC }}" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE="{{ MONGO_BACKUP_PRUNE_OLDER_THAN_DATE }}" +MONGO_BACKUP_SNITCH_URL="{{ MONGO_BACKUP_SNITCH_URL }}" +aws_region="{{ aws_region }}" + +# export to make available to aws cli +export AWS_ACCESS_KEY_ID +export AWS_SECRET_ACCESS_KEY + +# other vars +archive_path="mongo-backup-$(date --iso-8601=minutes --utc)" + +# verify required variables are set +required() { + if [ -z "$1" ]; then + echo "$2" + required_var_missing="yes" + fi +} +required "$MONGO_BACKUP_EBS_VOLUME_DEVICE" "MONGO_BACKUP_EBS_VOLUME_DEVICE missing; EBS volume device path is required" +required "$MONGO_BACKUP_EBS_VOLUME_ID" "MONGO_BACKUP_EBS_VOLUME_ID missing; EBS volume id is required" +required "$MONGO_BACKUP_VOLUME_MOUNT_PATH" "MONGO_BACKUP_VOLUME_MOUNT_PATH missing; path on which to mount ebs backup volume is required" +required "$MONGO_BACKUP_NODE" "MONGO_BACKUP_NODE missing; this must be set to determine if this is the correct node to run backup on" +required "$MONGO_BACKUP_AUTH_DATABASE" "MONGO_BACKUP_AUTH_DATABASE missing; this must be set to use the correct authenticationDatabase to auth against" +required "$MONGO_ADMIN_USER" "MONGO_ADMIN_USER missing; this must be set to auth against the database" +required "$MONGO_ADMIN_PASSWORD" "MONGO_ADMIN_PASSWORD missing; this must be set to auth against the database" +required "$AWS_ACCESS_KEY_ID" "MONGO_BACKUP_AWS_ACCESS_KEY_ID missing; this must be set to auth against the database" +required "$AWS_SECRET_ACCESS_KEY" "MONGO_BACKUP_AWS_SECRET_ACCESS_KEY missing; this must be set to auth against the database" +required "$aws_region" "aws_region missing; this must be set to use awscli" +[ -n "$required_var_missing" ] && exit 1 + + +# only run on specified node - this pulls the node name (ip address) of the mongo db on this instance +mynodename=$(echo "db.isMaster()" | mongo -u "$MONGO_ADMIN_USER" -p"$MONGO_ADMIN_PASSWORD" --authenticationDatabase "$MONGO_BACKUP_AUTH_DATABASE" "$EDXAPP_MONGO_DB_NAME" | grep \"me\" | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//'); +if [ "$mynodename" != "$MONGO_BACKUP_NODE" ]; then + echo "This is not the backup host. Run on a different instance." + exit 1 +fi + +# Acquire backup lock using this script itself as the lockfile. If another +# backup task is already running, then exit immediately. +exec 200<$0 +flock -n 200 || { echo "Another backup task is already running."; exit 1; } + +echo "Starting at $(date)" + +# ensure volume is mounted +mkdir -p "$MONGO_BACKUP_VOLUME_MOUNT_PATH" +if ! mountpoint -q "$MONGO_BACKUP_VOLUME_MOUNT_PATH"; then + mount -o discard,defaults,noatime "$MONGO_BACKUP_EBS_VOLUME_DEVICE" "$MONGO_BACKUP_VOLUME_MOUNT_PATH" +fi + +# Clean old backup files to save space and so we start afresh. +rm -rf "$MONGO_BACKUP_VOLUME_MOUNT_PATH/mongo/" +mkdir -p "$MONGO_BACKUP_VOLUME_MOUNT_PATH/mongo/" + + +# create the dump +# XXX: we may want to check how this lays out of disk and how it will play against ebs volume snapshots. The idea was +# that snapshots would be cheap because you only pay for data blocks that have changed between one snapshot and the +# next. however, if the mongodump -> gzip process ends up not being consistent in layout, +# this may end up with a lot of the disk content changing between snapshots. +mongodump --host="localhost" --oplog --gzip -u "$MONGO_ADMIN_USER" --password "$MONGO_ADMIN_PASSWORD" --authenticationDatabase "$MONGO_BACKUP_AUTH_DATABASE" --out="$MONGO_BACKUP_VOLUME_MOUNT_PATH/mongo/$archive_path" + + +# flush everything to disk, and unmount the volume ready to snapshot +sync +umount "$MONGO_BACKUP_VOLUME_MOUNT_PATH" + +# create a snapshot of the volume +snapshot_data=$(aws --region "$aws_region" ec2 create-snapshot --volume-id "$MONGO_BACKUP_EBS_VOLUME_ID" --description "$MONGO_BACKUP_SNAPSHOT_DESC") +echo "$snapshot_data" +snapshot_id="$(echo "$snapshot_data" | jq -r .SnapshotId)" + +# Poll until the snapshot has been created. We want to block here to avoid the chance of this script being run (and the +# current backup deleted / a new backup created) while the snapshot is taking place. The snapshot must also be done +# while the volume is unmounted to ensure data integrity. +while true; do + sleep 60 + snapshot_data=$(aws --region "$aws_region" ec2 describe-snapshots --snapshot-ids "$snapshot_id" || true) + if [ "$(echo "$snapshot_data" | jq -r '.Snapshots[0].State')" = "completed" ]; then + break + fi +done + +if [ -n "$MONGO_BACKUP_PRUNE_OLDER_THAN_DATE" ]; then + # Prune old snapshots + old_snapshot_data="$(aws --region "$aws_region" ec2 describe-snapshots --filters "Name=description,Values=$MONGO_BACKUP_SNAPSHOT_DESC")" + lines_="$(echo "$old_snapshot_data" | jq -r ".Snapshots | map(\"\(.SnapshotId) \(.StartTime)\") | .[]")" + earliest_date="$(date -d "$MONGO_BACKUP_PRUNE_OLDER_THAN_DATE" "+%s")" + while read -r line; do + # each $line looks like: "snap-0123456789DEADBEEF 2019-11-01T00:15:12.492Z" + snapshot_id=$(echo "$line" | cut -f1 -d' ') + timestamp="$(date -d "$(echo "$line" | cut -f2 -d' ')" "+%s")" + if [ "$timestamp" -lt "$earliest_date" ]; then + # this snapshot_id is older than we want to keep around, so delete it + aws --region "$aws_region" ec2 delete-snapshot --snapshot-id "$snapshot_id" + fi + done <<< "$lines_" +fi + +# ping the snitch url if available +if [ -n "$MONGO_BACKUP_SNITCH_URL" ]; then + curl "$MONGO_BACKUP_SNITCH_URL" +fi + +echo "End at $(date)" diff --git a/playbooks/roles/mongo_3_2/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_3_2/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_3_2/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_3_2/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_3_2/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_3_2/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_3_2/templates/mongod.conf.j2 b/playbooks/roles/mongo_3_2/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_3_2/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_3_4/defaults/main.yml b/playbooks/roles/mongo_3_4/defaults/main.yml new file mode 100644 index 00000000000..b5b247a3405 --- /dev/null +++ b/playbooks/roles/mongo_3_4/defaults/main.yml @@ -0,0 +1,111 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "3.4" +MONGO_VERSION_PATCH: "24" +PYMONGO_VERSION: "3.4.0" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_3_4/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_3_4/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_3_4/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_3_4/meta/main.yml b/playbooks/roles/mongo_3_4/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_3_4/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_3_4/tasks/main.yml b/playbooks/roles/mongo_3_4/tasks/main.yml new file mode 100644 index 00000000000..10dd2484cdf --- /dev/null +++ b/playbooks/roles/mongo_3_4/tasks/main.yml @@ -0,0 +1,404 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_3_4/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_3_4/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_3_4/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_3_4/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_3_4/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_3_4/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_3_4/templates/mongod.conf.j2 b/playbooks/roles/mongo_3_4/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_3_4/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_3_6/defaults/main.yml b/playbooks/roles/mongo_3_6/defaults/main.yml new file mode 100644 index 00000000000..802cdcc6ed0 --- /dev/null +++ b/playbooks/roles/mongo_3_6/defaults/main.yml @@ -0,0 +1,112 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "3.6" +MONGO_VERSION_PATCH: "17" +PYMONGO_VERSION: "3.6.1" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" +MONGODB_REPO_XENIAL: "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_3_6/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_3_6/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_3_6/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_3_6/meta/main.yml b/playbooks/roles/mongo_3_6/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_3_6/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_3_6/tasks/main.yml b/playbooks/roles/mongo_3_6/tasks/main.yml new file mode 100644 index 00000000000..5a1b83db774 --- /dev/null +++ b/playbooks/roles/mongo_3_6/tasks/main.yml @@ -0,0 +1,436 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +# add Bionic source list to install libssl1.0.0 on Focal +# so mongo3.6 installation does not fail with dependencies +- name: add source list to install libssl1.0.0 + apt_repository: + repo: "deb http://security.ubuntu.com/ubuntu bionic-security main" + state: present + tags: + - "install" + - "install:app-requirements" + when: ansible_distribution_release == 'focal' + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release != 'bionic' and ansible_distribution_release != 'focal' + +# mongo 3.6 does not have any source list for Bionic and Focal +# use Xenial repo source list to install mongo 3.6 +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO_XENIAL }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: remove source list used to install libssl1.0.0 + file: path=/etc/apt/sources.list.d/security_ubuntu_com_ubuntu.list state=absent + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release == 'focal' + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_3_6/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_3_6/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_3_6/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_3_6/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_3_6/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_3_6/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_3_6/templates/mongod.conf.j2 b/playbooks/roles/mongo_3_6/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_3_6/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_4_0/defaults/main.yml b/playbooks/roles/mongo_4_0/defaults/main.yml new file mode 100644 index 00000000000..591e5826591 --- /dev/null +++ b/playbooks/roles/mongo_4_0/defaults/main.yml @@ -0,0 +1,113 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "4.0" +MONGO_VERSION_PATCH: "22" +PYMONGO_VERSION: "3.11.2" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" +MONGODB_REPO_BIONIC: "deb http://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + protocolVersion: 1 + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_4_0/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_4_0/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_4_0/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_4_0/meta/main.yml b/playbooks/roles/mongo_4_0/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_4_0/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_4_0/tasks/main.yml b/playbooks/roles/mongo_4_0/tasks/main.yml new file mode 100644 index 00000000000..ab41799ba60 --- /dev/null +++ b/playbooks/roles/mongo_4_0/tasks/main.yml @@ -0,0 +1,436 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +# add Bionic source list to install libssl1.0.0 on Focal +# so mongo4.0 installation does not fail with dependencies +- name: add source list to install libssl1.0.0 + apt_repository: + repo: "deb http://security.ubuntu.com/ubuntu bionic-security main" + state: present + tags: + - "install" + - "install:app-requirements" + when: ansible_distribution_release == 'focal' + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release != 'focal' + +# mongo 4.0 does not have any source list for Focal +# use Bionci repo source list to install mongo 4.0 +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO_BIONIC }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release == 'focal' + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: remove source list used to install libssl1.0.0 + file: path=/etc/apt/sources.list.d/security_ubuntu_com_ubuntu.list state=absent + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release == 'focal' + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_4_0/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_4_0/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_4_0/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_4_0/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_4_0/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_4_0/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_4_0/templates/mongod.conf.j2 b/playbooks/roles/mongo_4_0/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_4_0/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_4_2/defaults/main.yml b/playbooks/roles/mongo_4_2/defaults/main.yml new file mode 100644 index 00000000000..09da0fcd5a3 --- /dev/null +++ b/playbooks/roles/mongo_4_2/defaults/main.yml @@ -0,0 +1,112 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "4.2" +MONGO_VERSION_PATCH: "14" +PYMONGO_VERSION: "3.11.2" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" +MONGODB_REPO_BIONIC: "deb http://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_4_2/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_4_2/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_4_2/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_4_2/meta/main.yml b/playbooks/roles/mongo_4_2/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_4_2/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_4_2/tasks/main.yml b/playbooks/roles/mongo_4_2/tasks/main.yml new file mode 100644 index 00000000000..a4ede61efb7 --- /dev/null +++ b/playbooks/roles/mongo_4_2/tasks/main.yml @@ -0,0 +1,417 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release != 'focal' + +# mongo 4.2 does not have any source list for Focal +# use Bionci repo source list to install mongo 4.2 +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO_BIONIC }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + when: ansible_distribution_release == 'focal' + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_4_2/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_4_2/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_4_2/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_4_2/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_4_2/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_4_2/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_4_2/templates/mongod.conf.j2 b/playbooks/roles/mongo_4_2/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_4_2/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_4_4/defaults/main.yml b/playbooks/roles/mongo_4_4/defaults/main.yml new file mode 100644 index 00000000000..a154a14c1fe --- /dev/null +++ b/playbooks/roles/mongo_4_4/defaults/main.yml @@ -0,0 +1,111 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "4.4" +MONGO_VERSION_PATCH: "28" +PYMONGO_VERSION: "3.11.2" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_4_4/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_4_4/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_4_4/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_4_4/meta/main.yml b/playbooks/roles/mongo_4_4/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_4_4/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_4_4/tasks/main.yml b/playbooks/roles/mongo_4_4/tasks/main.yml new file mode 100644 index 00000000000..10dd2484cdf --- /dev/null +++ b/playbooks/roles/mongo_4_4/tasks/main.yml @@ -0,0 +1,404 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_4_4/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_4_4/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_4_4/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_4_4/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_4_4/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_4_4/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_4_4/templates/mongod.conf.j2 b/playbooks/roles/mongo_4_4/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_4_4/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_5_0/defaults/main.yml b/playbooks/roles/mongo_5_0/defaults/main.yml new file mode 100644 index 00000000000..531115b54f0 --- /dev/null +++ b/playbooks/roles/mongo_5_0/defaults/main.yml @@ -0,0 +1,111 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "5.0" +MONGO_VERSION_PATCH: "24" +PYMONGO_VERSION: "3.12.3" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_5_0/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_5_0/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_5_0/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_5_0/meta/main.yml b/playbooks/roles/mongo_5_0/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_5_0/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_5_0/tasks/main.yml b/playbooks/roles/mongo_5_0/tasks/main.yml new file mode 100644 index 00000000000..10dd2484cdf --- /dev/null +++ b/playbooks/roles/mongo_5_0/tasks/main.yml @@ -0,0 +1,404 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_5_0/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_5_0/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_5_0/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_5_0/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_5_0/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_5_0/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_5_0/templates/mongod.conf.j2 b/playbooks/roles/mongo_5_0/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_5_0/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_6_0/defaults/main.yml b/playbooks/roles/mongo_6_0/defaults/main.yml new file mode 100644 index 00000000000..ed802237015 --- /dev/null +++ b/playbooks/roles/mongo_6_0/defaults/main.yml @@ -0,0 +1,111 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "6.0" +MONGO_VERSION_PATCH: "15" +PYMONGO_VERSION: "3.12.3" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_6_0/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_6_0/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_6_0/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_6_0/meta/main.yml b/playbooks/roles/mongo_6_0/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_6_0/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_6_0/tasks/main.yml b/playbooks/roles/mongo_6_0/tasks/main.yml new file mode 100644 index 00000000000..10dd2484cdf --- /dev/null +++ b/playbooks/roles/mongo_6_0/tasks/main.yml @@ -0,0 +1,404 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_6_0/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_6_0/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_6_0/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_6_0/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_6_0/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_6_0/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_6_0/templates/mongod.conf.j2 b/playbooks/roles/mongo_6_0/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_6_0/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_7_0/defaults/main.yml b/playbooks/roles/mongo_7_0/defaults/main.yml new file mode 100644 index 00000000000..a0d504a831f --- /dev/null +++ b/playbooks/roles/mongo_7_0/defaults/main.yml @@ -0,0 +1,111 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "7.0" +MONGO_VERSION_PATCH: "8" +PYMONGO_VERSION: "3.12.3" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ MONGO_VERSION }}" + - "mongodb-org-server={{ MONGO_VERSION }}" + - "mongodb-org-shell={{ MONGO_VERSION }}" + - "mongodb-org-mongos={{ MONGO_VERSION }}" + - "mongodb-org-tools={{ MONGO_VERSION }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_7_0/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_7_0/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_7_0/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_7_0/meta/main.yml b/playbooks/roles/mongo_7_0/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_7_0/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_7_0/tasks/main.yml b/playbooks/roles/mongo_7_0/tasks/main.yml new file mode 100644 index 00000000000..10dd2484cdf --- /dev/null +++ b/playbooks/roles/mongo_7_0/tasks/main.yml @@ -0,0 +1,404 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ PYMONGO_VERSION }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_7_0/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_7_0/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_7_0/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_7_0/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_7_0/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_7_0/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_7_0/templates/mongod.conf.j2 b/playbooks/roles/mongo_7_0/templates/mongod.conf.j2 new file mode 100644 index 00000000000..5b209c5cb13 --- /dev/null +++ b/playbooks/roles/mongo_7_0/templates/mongod.conf.j2 @@ -0,0 +1,39 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_client/defaults/main.yml b/playbooks/roles/mongo_client/defaults/main.yml new file mode 100644 index 00000000000..71300d4624a --- /dev/null +++ b/playbooks/roles/mongo_client/defaults/main.yml @@ -0,0 +1,9 @@ +--- +MONGO_VERSION_MAJOR_MINOR: "4.2" +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" +MONGODB_REPO_BIONIC: "deb http://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/{{ MONGO_VERSION_MAJOR_MINOR }} multiverse" +MONGO_CLIENT_VERSION: "4.2.14" + +mongo_client_debian_pkgs: + - "mongodb-org-shell={{ MONGO_CLIENT_VERSION }}" + - "mongodb-org-tools={{ MONGO_CLIENT_VERSION }}" diff --git a/playbooks/roles/mongo_client/tasks/main.yml b/playbooks/roles/mongo_client/tasks/main.yml new file mode 100644 index 00000000000..64c71616169 --- /dev/null +++ b/playbooks/roles/mongo_client/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-%7B%7B%20MONGO_VERSION_MAJOR_MINOR%20%7D%7D.asc" + state: present + retries: 3 + register: add_mongo_signing_key + tags: + - install + - install:system-requirements + until: add_mongo_signing_key is succeeded + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - install + - install:system-requirements + when: ansible_distribution_release != 'focal' + +# mongo 4.2 does not have any source list for Focal +# use Bionci repo source list to install mongo 4.2 +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO_BIONIC }}" + state: present + tags: + - install + - install:system-requirements + when: ansible_distribution_release == 'focal' + +- name: install mongo shell + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + with_items: "{{ mongo_client_debian_pkgs }}" + tags: + - install + - install:system-requirements diff --git a/playbooks/roles/mongo_mms/defaults/main.yml b/playbooks/roles/mongo_mms/defaults/main.yml new file mode 100644 index 00000000000..2acb92fcd2b --- /dev/null +++ b/playbooks/roles/mongo_mms/defaults/main.yml @@ -0,0 +1,13 @@ + +base_url: "https://cloud.mongodb.com/download/agent" +pkg_arch: "amd64" +pkg_format: "deb" +os_version: "ubuntu1604" + +mongodb_agent_dir: "/data" +mongodb_agent_user: "mongodb" +mongodb_agent: + - agent: mongodb-mms-automation-agent-manager + version: "13.7.0.8514-1" + config: "/etc/mongodb-mms/automation-agent.config" + dir: "automation" diff --git a/playbooks/roles/mongo_mms/handlers/main.yml b/playbooks/roles/mongo_mms/handlers/main.yml new file mode 100644 index 00000000000..eecb47dd642 --- /dev/null +++ b/playbooks/roles/mongo_mms/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart mms + service: name=mongodb-mms-monitoring-agent state=restarted diff --git a/playbooks/roles/mongo_mms/tasks/main.yml b/playbooks/roles/mongo_mms/tasks/main.yml new file mode 100644 index 00000000000..bab13647838 --- /dev/null +++ b/playbooks/roles/mongo_mms/tasks/main.yml @@ -0,0 +1,51 @@ +--- +# mongo_mms +# +# Example play: +# +# roles: +# - mongo_mms + +- fail: + msg: "MMSAPIKEY is required" + when: MMSAPIKEY is not defined + +# Install mongoDB agent +- name: download mongodb agent + get_url: + url: "{{ base_url }}/{{ item.dir }}/{{ item.agent }}_{{ item.version }}_{{ pkg_arch }}.{{ os_version }}.{{ pkg_format }}" + dest: "/tmp/{{ item.agent }}_{{ item.version }}.{{ pkg_format }}" + register: download_mongodb_deb + with_items: "{{ mongodb_agent }}" + +- name: install mongodb agent + apt: + deb: "/tmp/{{ item.agent }}_{{ item.version }}.deb" + when: download_mongodb_deb.changed + with_items: "{{ mongodb_agent }}" + +- name: add group ID to automation-agent.config + lineinfile: + dest: "{{ item.config }}" + regexp: "^mmsGroupId=" + line: "mmsGroupId={{ MMSGROUPID }}" + with_items: "{{ mongodb_agent }}" + +- name: add key to automation-agent.config + lineinfile: + dest: "{{ item.config }}" + regexp: "^mmsApiKey=" + line: "mmsApiKey={{ MMSAPIKEY }}" + with_items: "{{ mongodb_agent }}" + +- name: create data dir if it does not exist + file: + path: "{{ mongodb_agent_dir }}" + owner: "{{ mongodb_agent_user }}" + group: "{{ mongodb_agent_user }}" + state: directory + +- name: start mongodb agent service + service: + name: "mongodb-mms-automation-agent" + state: started diff --git a/playbooks/roles/mongo_newrelic_monitor/defaults/main.yml b/playbooks/roles/mongo_newrelic_monitor/defaults/main.yml new file mode 100644 index 00000000000..f3ec3ac972a --- /dev/null +++ b/playbooks/roles/mongo_newrelic_monitor/defaults/main.yml @@ -0,0 +1,5 @@ +MONGO_NEWRELIC_MONITOR_USER: 'newrelic-monitor' +MONGO_NEWRELIC_MONITOR_PASSWORD: 'SET-ME-PLEASE' +MONGO_NEWRELIC_USER_AUTH_SOURCE: 'admin' +MONGO_NEWRELIC_CLUSTER_NAME: 'mongo-cluster' +MONGO_NEWRELIC_LABELS: '{}' # eg '{ "env": "prod", "label": "my-label" }' diff --git a/playbooks/roles/mongo_newrelic_monitor/meta/main.yml b/playbooks/roles/mongo_newrelic_monitor/meta/main.yml new file mode 100644 index 00000000000..2edccabacac --- /dev/null +++ b/playbooks/roles/mongo_newrelic_monitor/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - newrelic_infrastructure diff --git a/playbooks/roles/mongo_newrelic_monitor/tasks/main.yml b/playbooks/roles/mongo_newrelic_monitor/tasks/main.yml new file mode 100644 index 00000000000..f472f0673d7 --- /dev/null +++ b/playbooks/roles/mongo_newrelic_monitor/tasks/main.yml @@ -0,0 +1,45 @@ +--- + +- name: Install newrelic mongo integration + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - "nri-mongodb" + +- name: Create newrelic monitoring role + shell: + cmd: 'mongo --authenticationDatabase admin -u "{{ MONGO_ADMIN_USER }}" -p "{{ MONGO_ADMIN_PASSWORD }}" --host "{{ MONGO_REPL_SET }}/{{ EDXAPP_MONGO_HOSTS }}" --eval ''db.createRole({ role: "listCollections", privileges: [{ resource: {db:"",collection:""}, actions: ["listCollections"] }], roles: [] });'' admin' + register: create_newrelic_role_result + ignore_errors: true + +- name: Fail if create newrelic monitoring role actually failed + fail: + msg: create newrelic monitoring role failed + when: 'create_newrelic_role_result.rc != 0 and "already exists" not in create_newrelic_role_result.stdout' + +- name: Create newrelic mongo user + mongodb_user: + login_database: "admin" + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + replica_set: "{{ MONGO_REPL_SET }}" + database: "admin" + name: "{{ MONGO_NEWRELIC_MONITOR_USER }}" + password: "{{ MONGO_NEWRELIC_MONITOR_PASSWORD }}" + roles: + - "clusterMonitor" + - "listCollections" + state: present + +- name: Copy newrelic mongo integration config + template: + src: "mongodb-config.yml.j2" + dest: "/etc/newrelic-infra/integrations.d/mongodb-config.yml" + backup: yes + +- name: Restart the infrastructure agent to apply changes + service: + name: newrelic-infra + state: restarted diff --git a/playbooks/roles/mongo_newrelic_monitor/templates/mongodb-config.yml.j2 b/playbooks/roles/mongo_newrelic_monitor/templates/mongodb-config.yml.j2 new file mode 100644 index 00000000000..788be0c2a1d --- /dev/null +++ b/playbooks/roles/mongo_newrelic_monitor/templates/mongodb-config.yml.j2 @@ -0,0 +1,44 @@ +# {{ ansible_managed }} +# +# Configuration docs: +# https://docs.newrelic.com/docs/integrations/host-integrations/host-integrations-list/mongodb-monitoring-integration#users-privileges +# +# This is designed to be installed on the same instance as a mongo node. + +integration_name: com.newrelic.mongodb + +instances: + - name: all + # Available commands are "all", "metrics", and "inventory" + command: all + arguments: + cluster_name: '{{ MONGO_NEWRELIC_CLUSTER_NAME }}' + # The mongos to connect to + host: localhost + # The port the mongos is running on + port: 27017 + # The username of the user created to monitor the cluster. + # This user should exist on the cluster as a whole as well + # as on each of the individual mongods. + username: '{{ MONGO_NEWRELIC_MONITOR_USER }}' + # The password for the monitoring user + password: '{{ MONGO_NEWRELIC_MONITOR_PASSWORD }}' + # The database on which the monitoring user is stored + auth_source: '{{ MONGO_NEWRELIC_USER_AUTH_SOURCE }}' + # Connect using SSL + ssl: false + # Skip verification of the certificate sent by the host. + # This can make the connection susceptible to man-in-the-middle attacks, + # and should only be used for testing + #ssl_insecure_skip_verify: true + # Path to the CA certs file + #ssl_ca_certs: /sample/path/to/ca_certs + # Client Certificate to present to the server (optional) + #pem_key_file: /sample/file.pem + # Passphrase to decrypt PEMKeyFile file (optional) + #passphrase: secret + # A JSON map of database names to an array of collection names. If empty, + # defaults to all databases and collections. If the list of collections is null, + # collects all collections for the database. + filters: '' + labels: {{ MONGO_NEWRELIC_LABELS }} diff --git a/playbooks/roles/mount_ebs/defaults/main.yml b/playbooks/roles/mount_ebs/defaults/main.yml new file mode 100644 index 00000000000..00e28c98058 --- /dev/null +++ b/playbooks/roles/mount_ebs/defaults/main.yml @@ -0,0 +1,19 @@ +# Expects a list of dicts with these keys +# - device: /dev/xvdk +# mount: /edx/var/mongo +# options: "defaults,noatime" +# fstype: ext4 +# While mount, options and fstype are pretty standard in our app, the device names +# will be highly dependent on how you stand up your instances. +# +# Additionally - order is important if you have child directories. If you want to mount +# /edx/var/mongo and /edx/var/mongo/mongodb/journal, you must specify them in that order, +# otherwise this role will mount /edx/var/mongo over the top of /edx/var/mongo/mongodb/journal +# which is not what you wanted. +volumes: [] + +UNMOUNT_DISKS: false +# WARNING! FORCE_REFORMAT_DISKS will cause your volumes to always be reformatted +# even if all the volume's attributes already match what you've defined in volumes[] +# Enable this flag at your own risk with an abundance of caution +FORCE_REFORMAT_DISKS: false diff --git a/playbooks/roles/mount_ebs/tasks/main.yml b/playbooks/roles/mount_ebs/tasks/main.yml new file mode 100644 index 00000000000..52fe8291ee5 --- /dev/null +++ b/playbooks/roles/mount_ebs/tasks/main.yml @@ -0,0 +1,97 @@ +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role mount_dbs +# +# Overview: +# +# This role ensures that the correct EBS volumes are mounted to the right locations. +# If the volumes are already mounted to the correct place, this role does nothing. + +# Newer AWS EC2 instances sometimes swap the order of the disks, resulting in a very small data volume and a very large +# journal volume. This prevents that by confirming that the disk sizes are correct before proceeding. Rebooting seems to +# fix the ordering +- name: Check disk size + assert: + that: + - "{{ ansible_devices[item.device.split('/')[-1]]['size'] == item.size }}" + fail_msg: "Actual size {{ ansible_devices[item.device.split('/')[-1]]['size'] }} != Expected size {{ item.size }}. Rebooting the instance may fix the ordering issue" + with_items: "{{ volumes }}" + +# This task will be skipped if UNMOUNT_DISKS is false, causing the next task +# to error if the disk has the wrong fstype but is already mounted +- name: Unmount disk if fstype is wrong + mount: + name: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'mount': None})).mount }}" + fstype: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype }}" + state: unmounted + when: "UNMOUNT_DISKS and (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype != item.fstype" + with_items: "{{ volumes }}" + +# If there are disks we want to be unmounting, but we can't because UNMOUNT_DISKS is false +# that is an errorable condition, since it will cause the format step to fail +- name: Check that we don't want to unmount disks to change fstype when UNMOUNT_DISKS is false + fail: msg="Found disks mounted with the wrong filesystem type, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe." + when: + "not UNMOUNT_DISKS and + volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and + (volumes | selectattr('device', 'equalto', item.device) | first).fstype != item.fstype" + with_items: "{{ ansible_mounts }}" + +# Noop & reports "ok" if fstype is correct +# Errors if fstype is wrong and disk is mounted (hence above task) +- name: Create filesystem + filesystem: + dev: "{{ item.device }}" + fstype: "{{ item.fstype }}" + # Necessary because AWS gives some ephemeral disks the wrong fstype by default + force: "{{ FORCE_REFORMAT_DISKS }}" + with_items: "{{ volumes }}" + +- name: Regather facts to get UUIDs of freshly formatted disks + setup: "" + +# This can fail if one volume is mounted on a child directory as another volume +# and it attempts to unmount the parent first. This is generally fixable by rerunning. +# Order is super dependent here, but we're iterating ansible_mounts (in order to identify +# all current mounts in the system) not volumes, which would be reversible. +# Possibly fixable by saving this list of unmounts off and comparing it to volumes, but this +# task rarely runs, since on server setup, the disks are unmounted, and in we won't +# be unmounting disks unless you set UNMOUNT_DISKS to true. +- name: Unmount disks mounted to the wrong place + mount: + name: "{{ item.mount }}" + fstype: "{{ item.fstype }}" + state: unmounted + when: + UNMOUNT_DISKS and + volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and + (volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount + with_items: "{{ ansible_mounts }}" + +# If there are disks we want to be unmounting, but we can't because UNMOUNT_DISKS is false +# that is an errorable condition, since it can easily allow us to double mount a disk. +- name: Check that we don't want to unmount disks to change mountpoint when UNMOUNT_DISKS is false + fail: msg="Found disks mounted in the wrong place, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe." + when: + not UNMOUNT_DISKS and + volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and + (volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount + with_items: "{{ ansible_mounts }}" + +# Use UUID to prevent issues with AWS EC2 swapping device order +- name: Mount disks + mount: + name: "{{ item.mount }}" + src: "UUID={{ ansible_devices[item.device.split('/')[-1]]['links']['uuids'][0] }}" + state: mounted + fstype: "{{ item.fstype }}" + opts: "{{ item.options }}" + with_items: "{{ volumes }}" diff --git a/playbooks/roles/munin_node/defaults/main.yml b/playbooks/roles/munin_node/defaults/main.yml new file mode 100644 index 00000000000..294825d2c86 --- /dev/null +++ b/playbooks/roles/munin_node/defaults/main.yml @@ -0,0 +1,29 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Defaults for role munin-node +# +MUNIN_MONITOR_CIDR: 127.0.0.1/32 +# +# vars are namespaced with the module name. +# +munin_node_role_name: munin-node + +# +# OS packages +# + +munin_node_debian_pkgs: + - "munin-node" + - "sysstat" + - "libnet-cidr-perl" + +munin_node_redhat_pkgs: [] + diff --git a/playbooks/roles/munin_node/files/munin-edx b/playbooks/roles/munin_node/files/munin-edx new file mode 100644 index 00000000000..b53ae568479 --- /dev/null +++ b/playbooks/roles/munin_node/files/munin-edx @@ -0,0 +1,2 @@ +[iostat] +env.SHOW_NUMBERED 1 \ No newline at end of file diff --git a/playbooks/roles/munin_node/tasks/main.yml b/playbooks/roles/munin_node/tasks/main.yml new file mode 100644 index 00000000000..cef144f577b --- /dev/null +++ b/playbooks/roles/munin_node/tasks/main.yml @@ -0,0 +1,87 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role munin_node +# +# Overview: +# +# Install the munin-node monitoring daemon that is generally useful for system +# monitoring and is specifically used by the the MongoDB CloudManager monitoring +# agent to provide system metrics. +# +# Example play: +# +# ansible-playbook -u my_user -i '192.168.100.10,192,168.100.11,' ./run_role.yml -e "role=munin_node" +# + +- name: Install OS packages + apt: + name: "{{ item }}" + state: present + update_cache: yes + with_items: "{{ munin_node_debian_pkgs }}" + tags: + - install + - install:system-requirements + +# Enable optional plugins for io metrics +- name: Link munin-node iostat + file: + src: "/usr/share/munin/plugins/iostat" + dest: "/etc/munin/plugins/iostat" + state: link + owner: root + group: root + tags: + - install + - install:configuration + +- name: Link munin-node iostat-ios + file: + src: "/usr/share/munin/plugins/iostat_ios" + dest: "/etc/munin/plugins/iostat_ios" + state: link + owner: root + group: root + tags: + - install + - install:configuration + +- name: Copy munin iostat config + copy: + src: "munin-edx" + dest: "/etc/munin/plugin-conf.d/munin-edx" + owner: root + group: root + mode: "0644" + tags: + - install + - install:configuration +# +# Assumes a single allow line +# Requires libnet-cidr-perl included in the pkgs installed +# above +- name: Add source cidr + lineinfile: + dest: /etc/munin/munin-node.conf + regexp: '^cidr_allow' + line: 'cidr_allow {{ MUNIN_MONITOR_CIDR }}' + tags: + - install + - install:configuration + +- name: Restart munin-mode + service: + name: munin-node + state: restarted + tags: + - install + - install:configuration \ No newline at end of file diff --git a/playbooks/roles/mysql/defaults/main.yml b/playbooks/roles/mysql/defaults/main.yml new file mode 100644 index 00000000000..de977aa4ecf --- /dev/null +++ b/playbooks/roles/mysql/defaults/main.yml @@ -0,0 +1,38 @@ +--- +remove_experimental_mysql: false + +mysql_debian_pkgs_default: + - python3-mysqldb +mysql_release_specific_debian_pkgs: + xenial: + - python-mysqldb + bionic: + - python-mysqldb + focal: + - python3-mysqldb +mysql_debian_pkgs: "{{ mysql_debian_pkgs_default + mysql_release_specific_debian_pkgs[ansible_distribution_release] }}" + +mysql_server_pkg: "{{ 'mysql-server-5.7' if mysql_server_version_5_7 is defined and (mysql_server_version_5_7 | bool) else 'mysql-server-5.6' }}" +mysql_server_5_7_pkg: "mysql-server=5.7.*" +mysql_client_5_7_pkg: "mysql-client=5.7.*" +mysql_community_server_5_7_pkg: "mysql-server=5.7.*" + +mysql_dir: /etc/mysql + +mysql_socket: /var/run/mysqld/mysqld.sock + +mysql_8_0_install: true + +mysql_server_8_0_pkgs: + - mysql-client-8.0 + - mysql-client-core-8.0 + - mysql-common + - mysql-server-8.0 + - mysql-server-core-8.0 + +DEFAULT_MYSQL_CHARACTER_SET: utf8 +DEFAULT_MYSQL_COLLATION: utf8_general_ci + +MYSQL_APT_KEYSERVER: "keyserver.ubuntu.com" +MYSQL_APT_KEY: "467B942D3A79BD29" +MYSQL_REPO: "deb http://repo.mysql.com/apt//ubuntu/ bionic mysql-5.7" diff --git a/playbooks/roles/mysql/meta/main.yml b/playbooks/roles/mysql/meta/main.yml new file mode 100644 index 00000000000..2083f0e1251 --- /dev/null +++ b/playbooks/roles/mysql/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/playbooks/roles/mysql/tasks/main.yml b/playbooks/roles/mysql/tasks/main.yml new file mode 100644 index 00000000000..e5ab6b896bb --- /dev/null +++ b/playbooks/roles/mysql/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Look for mysql 5.6 + shell: "dpkg -s mysql-server" + ignore_errors: yes + register: mysql_56_installed + changed_when: no + +- name: Important message + debug: + msg: | + "MySQL experimental is already installed, make 'remove_experimental_mysql: true' in defaults/main.yml, + if you want to remove it and install the stable version of MySQL" + when: "'5.6.14' in mysql_56_installed.stdout and not remove_experimental_mysql" + +- pause: + seconds: 10 + when: "'5.6.14' in mysql_56_installed.stdout and not remove_experimental_mysql" + +# remove this, once the new devstack is out +- include: remove_mysql_experimental.yml + when: remove_experimental_mysql + +- include: mysql.yml + when: (mysql_56_installed.rc == 1) or (remove_experimental_mysql) diff --git a/playbooks/roles/mysql/tasks/mysql.yml b/playbooks/roles/mysql/tasks/mysql.yml new file mode 100644 index 00000000000..b1ec751d242 --- /dev/null +++ b/playbooks/roles/mysql/tasks/mysql.yml @@ -0,0 +1,124 @@ +--- +# Here are the few steps that I have performed to get the `debconf` setting information: +# +# - downloaded the mysql-apt-repo from the mysql official site(named: mysql-apt-config_0.6.0-1_all.deb) +# - Find the `debconf` setting information after installing this `deb` package +# +# # debconf-show mysql-apt-config +# * mysql-apt-config/select-server: mysql-5.6 +# mysql-apt-config/unsupported-platform: abort +# mysql-apt-config/repo-codename: precise +# * mysql-apt-config/select-product: Apply +# mysql-apt-config/select-tools: +# mysql-apt-config/repo-distro: ubuntu +# +# - Even to get more information about the `debconf` options of the package: +# +# # debconf-get-selections|grep mysql-apt-config +# mysql-apt-config mysql-apt-config/select-server select mysql-5.6 +# mysql-apt-config mysql-apt-config/unsupported-platform select abort +# mysql-apt-config mysql-apt-config/repo-codename select precise +# mysql-apt-config mysql-apt-config/select-product select Apply +# mysql-apt-config mysql-apt-config/select-tools select +# mysql-apt-config mysql-apt-config/repo-distro select ubuntu +# +# - After the installation, I have checked the `/etc/apt/sources.list.d` directory and found one file `mysql.list` +# with following contents: +# +# deb http://repo.mysql.com/apt/ubuntu/ precise mysql-apt-config +# deb http://repo.mysql.com/apt/ubuntu/ precise mysql-5.6 +# deb-src http://repo.mysql.com/apt/ubuntu/ precise mysql-5.6 +# +# +# Thought that instead of performing all those steps and get the repo, why not directly use this repo +# `deb http://repo.mysql.com/apt/ubuntu/ precise mysql-5.6`, I just picked this line and directly used it and it worked for us. + +- name: Install mysql dependencies + apt: + name: "{{ mysql_debian_pkgs }}" + install_recommends: yes + state: present + +- name: Install mysql + apt: + name: "{{ mysql_server_pkg }}" + install_recommends: yes + state: present + when: ansible_distribution_release != 'focal' + +- name: Set default character sets and collations + template: + src: default_character_sets_and_collations.cnf.j2 + dest: "{{ mysql_dir }}/mysql.conf.d/default_character_sets_and_collations.cnf" + owner: root + group: root + mode: 0644 + when: ansible_distribution_release != 'bionic' and ansible_distribution_release != 'focal' + +- name: add the mysql signing key + apt_key: + keyserver: "{{ MYSQL_APT_KEYSERVER }}" + id: "{{ MYSQL_APT_KEY }}" + when: ansible_distribution_release == 'focal' and not mysql_8_0_install + +- name: add the mysql-5.7 repo to the sources list + apt_repository: + repo: "{{ MYSQL_REPO }}" + state: present + when: ansible_distribution_release == 'focal' and not mysql_8_0_install + +# Installing mysql-5.7-client, mysql-5.7-community-server +# and mysql-5.7-server in separate tasks to resolve dependencies +- name: install mysql-5.7-client + apt: + name: "{{ mysql_client_5_7_pkg }}" + state: present + update_cache: yes + when: ansible_distribution_release == 'focal' and not mysql_8_0_install + +- name: install mysql-5.7-community-server + apt: + name: "{{ mysql_community_server_5_7_pkg }}" + state: present + update_cache: yes + when: ansible_distribution_release == 'focal' and not mysql_8_0_install + +- name: install mysql-5.7-server + apt: + name: "{{ mysql_server_5_7_pkg }}" + state: present + update_cache: yes + when: ansible_distribution_release == 'focal' and not mysql_8_0_install + +- name: install mysql-8.0 + apt: + name: "{{ mysql_server_8_0_pkgs }}" + state: present + update_cache: yes + when: ansible_distribution_release == 'focal' and mysql_8_0_install + + +- name: restart mysql + command: service mysql restart + +- name: Ensure Anonymous user(s) does not exist + mysql_user: + name: '' + host: "{{ item }}" + state: absent + login_unix_socket: "{{ mysql_socket }}" + with_items: + - localhost + - "{{ ansible_hostname }}" + +- name: Alter user root to use mysql_native_password + shell: + mysql -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password;set password=password('')" + become: true + when: ansible_distribution_release == 'focal' and not mysql_8_0_install + +- name: Alter user root to use mysql_native_password + shell: + mysql -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password by ''" + become: true + when: ansible_distribution_release == 'focal' and mysql_8_0_install diff --git a/playbooks/roles/mysql/tasks/remove_mysql_experimental.yml b/playbooks/roles/mysql/tasks/remove_mysql_experimental.yml new file mode 100644 index 00000000000..f99c195cec0 --- /dev/null +++ b/playbooks/roles/mysql/tasks/remove_mysql_experimental.yml @@ -0,0 +1,19 @@ +--- +- name: Stop mysql service + service: + name: mysql + state: stopped + +- name: Remove experimental apt repository + apt_repository: + repo: 'deb http://ppa.launchpad.net/ondrej/mysql-experimental/ubuntu precise main' + state: absent + +- name: Remove experimental version of mysql + apt: + name: "{{ item }}" + state: absent + purge: yes + with_items: + - mysql-server-5.6 + - mysql-server \ No newline at end of file diff --git a/playbooks/roles/mysql/templates/default_character_sets_and_collations.cnf.j2 b/playbooks/roles/mysql/templates/default_character_sets_and_collations.cnf.j2 new file mode 100644 index 00000000000..14f5556f4f7 --- /dev/null +++ b/playbooks/roles/mysql/templates/default_character_sets_and_collations.cnf.j2 @@ -0,0 +1,17 @@ + +# {{ ansible_managed }} + +# This does not change any existing databases or rows, only the defaults for newly created databases + +[client] +default-character-set={{ DEFAULT_MYSQL_CHARACTER_SET }} + +[mysql] +default-character-set={{ DEFAULT_MYSQL_CHARACTER_SET }} + + +[mysqld] +collation-server = {{ DEFAULT_MYSQL_COLLATION }} +init-connect='SET NAMES {{ DEFAULT_MYSQL_CHARACTER_SET }}' +character-set-server = {{ DEFAULT_MYSQL_CHARACTER_SET }} + diff --git a/playbooks/roles/neo4j/defaults/main.yml b/playbooks/roles/neo4j/defaults/main.yml new file mode 100644 index 00000000000..12d7dd74a72 --- /dev/null +++ b/playbooks/roles/neo4j/defaults/main.yml @@ -0,0 +1,53 @@ +--- +# +# Adds the latest stable package of neo4j community edition +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role neo4j +# + +# +# vars are namespaced with the module name. +# +NEO4J_SERVER_NAME: "localhost" +NEO4J_AUTH_ENABLED: "true" + +# When updating this version, please update the corresponding +# neo4j Docker image tag used by the Devstack coursegraph service +# (see github.com/openedx/devstack/tree/master/docker-compose.yml). +# Note that the corresponding docker image tag does not include the +# epoch prefix ('1:') -- it's just 'Major.Minor.Patch'. +NEO4J_VERSION: "1:3.5.28" + +# If upgrading to a Major.Minor series other than 3.5, you'll need +# to change the '3.5' repository component below accordingly. +neo4j_apt_repository: "deb https://debian.neo4j.com stable 3.5" + +neo4j_gpg_key_url: https://debian.neo4j.com/neotechnology.gpg.key +neo4j_defaults_file: "/etc/default/neo4j" +neo4j_server_config_file: "/etc/neo4j/neo4j.conf" +neo4j_bolt_port: 7687 # default in package is 7687 +neo4j_https_port: 7473 # default in package is 7473 +neo4j_http_port: 7474 # default in package is 7474 +neo4j_listen_address: "0.0.0.0" +neo4j_heap_max_size: "6000m" +neo4j_page_cache_size: "6000m" +neo4j_log_dir: "/var/log/neo4j" + +# Properties file settings +neo4j_bolt_settings_key: "dbms.connector.bolt.listen_address" +neo4j_bolt_tls_key: "dbms.connector.bolt.tls_level" +neo4j_https_settings_key: "dbms.connector.https.listen_address" +neo4j_http_settings_key: "dbms.connector.http.listen_address" + +# Deprecated files to delete +deprecated_neo4j_wrapper_config_file: "/etc/neo4j/neo4j-wrapper.conf" +deprecated_neo4j_https_settings_key: "dbms.connector.https.address" +deprecated_neo4j_http_settings_key: "dbms.connector.http.address" diff --git a/playbooks/roles/neo4j/meta/main.yml b/playbooks/roles/neo4j/meta/main.yml new file mode 100644 index 00000000000..e9cd64b1a43 --- /dev/null +++ b/playbooks/roles/neo4j/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - common + - role: oraclejdk + ORACLEJDK_VERSION: "8u131" + oraclejdk_base: "jdk1.8.0_131" + oraclejdk_build: "b11" + oraclejdk_link: "/usr/lib/jvm/java-8-oracle" diff --git a/playbooks/roles/neo4j/tasks/main.yml b/playbooks/roles/neo4j/tasks/main.yml new file mode 100644 index 00000000000..1ccd4ac0ea1 --- /dev/null +++ b/playbooks/roles/neo4j/tasks/main.yml @@ -0,0 +1,205 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role neo4j +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: add neo4j gpg key + apt_key: + url: "{{ neo4j_gpg_key_url }}" + state: present + tags: + - install + - install:system-requirements + retries: 10 + delay: 10 + register: neo4j_gpg_key + until: neo4j_gpg_key is succeeded + +- name: add neo4j apt repository + apt_repository: + repo: "{{ neo4j_apt_repository }}" + state: present + tags: + - install + - install:system-requirements + retries: 10 + delay: 10 + register: neo4j_apt_repository + until: neo4j_apt_repository is succeeded + +- name: remove deprecated config file + file: + state: absent + path: "{{ deprecated_neo4j_wrapper_config_file }}" + tags: + - install + - install:base + +- name: install neo4j + apt: + name: "neo4j={{NEO4J_VERSION}}" + state: present + tags: + - install + - install:base + retries: 10 + delay: 10 + register: neo4j_apt_pkg + until: neo4j_apt_pkg is succeeded + +# For what it's worth: We purposely do not prefix these line-replacement +# regex with ^ or suffix them with $. That's because we cannot be +# confident whether these lines initially (i) exist in file commented-out, +# or (ii) exist in the file with a value already set. So, we purposefully +# leave the regexes without beginning- or end-of-line matches so that +# they can handle both scenario (i) and (ii). +# In the future, it'd be good to get rid of these tasks, and instead +# just include j2-templated configuration files to wholesale replace +# what's on the box. + +- name: enable or disable authentication + lineinfile: + dest: "{{ neo4j_server_config_file }}" + regexp: "dbms.security.auth_enabled=" + line: "dbms.security.auth_enabled={{ NEO4J_AUTH_ENABLED }}" + tags: + - install + - install:configuration + +- name: set neo4j page cache size + lineinfile: + dest: "{{ neo4j_server_config_file }}" + regexp: "dbms.memory.pagecache.size=" + line: "dbms.memory.pagecache.size={{ neo4j_page_cache_size }}" + tags: + - install + - install:configuration + +- name: set neo4j heap size + lineinfile: + dest: "{{ neo4j_server_config_file }}" + regexp: "{{ item }}=" + line: "{{ item }}={{ neo4j_heap_max_size }}" + with_items: + - "dbms.memory.heap.max_size" + - "dbms.memory.heap.initial_size" + tags: + - install + - install:configuration + +- name: allow format migration (when updating neo4j versions) + lineinfile: + dest: "{{ neo4j_server_config_file }}" + regexp: "dbms.allow_upgrade=" + line: "dbms.allow_upgrade=true" + tags: + - install + - install:configuration + +- name: set to listen on specific port for bolt + lineinfile: + create: yes + dest: "{{ neo4j_server_config_file }}" + regexp: "{{ neo4j_bolt_settings_key }}=" + line: "{{ neo4j_bolt_settings_key }}={{ neo4j_listen_address }}:{{ neo4j_bolt_port }}" + tags: + - install + - install:configuration + +- name: allow both encrypted and unencrypted bolt connections + lineinfile: + create: yes + dest: "{{ neo4j_server_config_file }}" + regexp: "{{ neo4j_bolt_tls_key }}=" + line: "{{ neo4j_bolt_tls_key }}=OPTIONAL" + tags: + - install + - install:configuration + +- name: set to listen on specific port for https + lineinfile: + create: yes + dest: "{{ neo4j_server_config_file }}" + regexp: "{{ neo4j_https_settings_key }}=" + line: "{{ neo4j_https_settings_key }}={{ neo4j_listen_address }}:{{ neo4j_https_port }}" + tags: + - install + - install:configuration + +- name: set to listen on specific port for http + lineinfile: + create: yes + dest: "{{ neo4j_server_config_file }}" + regexp: "{{ neo4j_http_settings_key }}=" + line: "{{ neo4j_http_settings_key }}={{ neo4j_listen_address }}:{{ neo4j_http_port }}" + tags: + - install + - install:configuration + +- name: remove deprecated listen address lines + lineinfile: + state: absent + dest: "{{ neo4j_server_config_file }}" + regexp: "{{ item }}" + with_items: + - "{{ deprecated_neo4j_https_settings_key }}" + - "{{ deprecated_neo4j_http_settings_key }}" + tags: + - install + - install:configuration + +- name: Create neo4j logging dir + file: + path: "{{ neo4j_log_dir }}" + state: directory + owner: neo4j + mode: "0755" + tags: + - install + - install:base + +- name: Create neo4j default file + file: + path: "{{ neo4j_defaults_file }}" + state: touch + owner: neo4j + mode: "0755" + tags: + - install + - install:base + +- name: set max open files to 40000 + lineinfile: + dest: "{{ neo4j_defaults_file }}" + regexp: "#NEO4J_ULIMIT_NOFILE=40000" + line: "NEO4J_ULIMIT_NOFILE=40000" + tags: + - install + - install:base + +- name: restart neo4j + service: + name: neo4j + state: restarted + enabled: yes + tags: + - manage + - manage:start diff --git a/playbooks/roles/neo4j/templates/edx/app/nginx/sites-available/coursegraph.j2 b/playbooks/roles/neo4j/templates/edx/app/nginx/sites-available/coursegraph.j2 new file mode 100644 index 00000000000..70ead75ebaf --- /dev/null +++ b/playbooks/roles/neo4j/templates/edx/app/nginx/sites-available/coursegraph.j2 @@ -0,0 +1,56 @@ +# +# {{ ansible_managed }} +# + + +{% if nginx_default_sites is defined and "neo4j" in nginx_default_sites %} + {% set default_site = "default" %} +{% else %} + {% set default_site = "" %} +{% endif %} + +server { + server_name {{ NEO4J_SERVER_NAME }}; + + {% if NGINX_ENABLE_SSL %} + + listen 443 ssl; + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + {% else %} + listen 80 {{ default_site }}; + {% endif %} + + + location / { + try_files $uri @proxy_to_app; + } + + location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://127.0.0.1:{{ neo4j_http_port }}; + } + + {% if NGINX_ENABLE_SSL %} + + # Forward to HTTPS if we're an HTTP request... + if ($http_x_forwarded_proto = "http") { + set $do_redirect "true"; + } + + # Run our actual redirect... + if ($do_redirect = "true") { + rewrite ^ https://$host$request_uri? permanent; + } + + {% endif %} + +} diff --git a/playbooks/roles/newrelic/defaults/main.yml b/playbooks/roles/newrelic/defaults/main.yml deleted file mode 100644 index e1fe528fb27..00000000000 --- a/playbooks/roles/newrelic/defaults/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -## -# Defaults for role newrelic -# - -# -# vars are namespace with the module name. -# -newrelic_role_name: newrelic - -NEWRELIC_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free' -NEWRELIC_KEY_ID: '548C16BF' -NEWRELIC_KEY_URL: 'https://download.newrelic.com/%7B%7B%20NEWRELIC_KEY_ID%20%7D%7D.gpg' -NEWRELIC_LICENSE_KEY: 'NEW-RELIC-KEY' - -# -# OS packages -# - -newrelic_debian_pkgs: - - newrelic-sysmond - -newrelic_redhat_pkgs: [] diff --git a/playbooks/roles/newrelic/tasks/main.yml b/playbooks/roles/newrelic/tasks/main.yml deleted file mode 100644 index c0dd5b591a4..00000000000 --- a/playbooks/roles/newrelic/tasks/main.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# Tasks for role newrelic -# -# Overview: -# -# Installs and configures the newrelic system monitoring agent. The server -# will be identified in Newrelic by hostname and this cannot be changed -# in configuration. Also configuratin does not allow hosts to be -# associated with an application or tagged. -# -# Example play: -# -# - name: Install Newrelic system agent -# hosts: all -# sudo: True -# gather_facts: True -# roles: -# - newrelic - -- name: add apt key - apt_key: > - id="{{ NEWRELIC_KEY_ID }}" url="{{ NEWRELIC_KEY_URL }}" - state=present - -- name: install apt repository - apt_repository: repo="{{ NEWRELIC_REPO }}" update_cache=yes - -- name: install newrelic agent - apt: pkg="newrelic-sysmond" - -- name: Install newrelic related system packages. - apt: pkg={{ item }} install_recommends=yes state=present - with_items: newrelic_debian_pkgs - -- name: configure the agent with the license key - shell: > - nrsysmond-config --set license_key="{{ NEWRELIC_LICENSE_KEY }}" - ssl=true - -- name: ensure started and enabled - service: name=newrelic-sysmond state=restarted enabled=yes \ No newline at end of file diff --git a/playbooks/roles/newrelic_infrastructure/defaults/main.yml b/playbooks/roles/newrelic_infrastructure/defaults/main.yml new file mode 100644 index 00000000000..4833da32155 --- /dev/null +++ b/playbooks/roles/newrelic_infrastructure/defaults/main.yml @@ -0,0 +1,39 @@ +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role newrelic_infrastructure +# + +# +# vars are namespace with the module name. +# +--- +NEWRELIC_INFRASTRUCTURE_LICENSE_KEY: "SPECIFY_KEY_HERE" +NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO: 'deb https://download.newrelic.com/infrastructure_agent/linux/apt {{ ansible_distribution_release }} main' +NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO_XENIAL: 'deb https://download.newrelic.com/infrastructure_agent/linux/apt xenial main' +NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO_BIONIC: 'deb https://download.newrelic.com/infrastructure_agent/linux/apt bionic main' +NEWRELIC_INFRASTRUCTURE_KEY_URL: 'https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg' + +# This data structure will be written into /etc/newrelic-infra/logging.d/logs.yml +NEWRELIC_LOGS: !!null + +# Any extra config you want to specify +# https://docs.newrelic.com/docs/infrastructure/new-relic-infrastructure/configuration/infrastructure-config-file-template-newrelic-infrayml +NEWRELIC_INFRASTRUCTURE_EXTRA_CONFIG: '' +NEWRELIC_INFRASTRUCTURE_AMAZON_REPO: 'https://download.newrelic.com/infrastructure_agent/linux/yum/el/6/x86_64' +# +# OS packages +# + +newrelic_infrastructure_debian_pkgs: + - newrelic-infra + +newrelic_infrastructure_redhat_pkgs: + - newrelic-infra + diff --git a/playbooks/roles/newrelic_infrastructure/files/edx/bin/write_nr_display_name_config.sh b/playbooks/roles/newrelic_infrastructure/files/edx/bin/write_nr_display_name_config.sh new file mode 100644 index 00000000000..9488a6ed118 --- /dev/null +++ b/playbooks/roles/newrelic_infrastructure/files/edx/bin/write_nr_display_name_config.sh @@ -0,0 +1,18 @@ +#! /usr/bin/env bash + +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + DISPLAY_NAME="$HOSTNAME-$INSTANCEID" + if [[ -f /etc/newrelic/nrsysmond.cfg ]]; then + sudo sed -i 's/^hostname=.*//g' /etc/newrelic/nrsysmond.cfg + echo "hostname=\"$DISPLAY_NAME\"" | sudo tee -a /etc/newrelic/nrsysmond.cfg + sudo service newrelic-sysmond restart + fi + if [[ -f /etc/newrelic-infra.yml ]]; then + sudo sed -i 's/^display_name: .*//g' /etc/newrelic-infra.yml + echo "display_name: \"$DISPLAY_NAME\"" | sudo tee -a /etc/newrelic-infra.yml + sudo service newrelic-infra restart + fi +fi + diff --git a/playbooks/roles/newrelic_infrastructure/tasks/main.yml b/playbooks/roles/newrelic_infrastructure/tasks/main.yml new file mode 100644 index 00000000000..49b10ba44c7 --- /dev/null +++ b/playbooks/roles/newrelic_infrastructure/tasks/main.yml @@ -0,0 +1,174 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# newrelic_infrastructure +# +# Overview: +# +# Installs the New Relic Infrastructure service https://newrelic.com/infrastructure +## +# Dependencies: +# +# Example play: +# roles: +# - common +# - newrelic_infrastructure +# +# +- name: install license key + template: + src: etc/newrelic-infra.j2 + dest: /etc/newrelic-infra.yml + mode: 0600 + register: license_key_file + tags: + - install + - install:configuration + +- name: Add apt key for New Relic Infrastructure + apt_key: + url: "{{ NEWRELIC_INFRASTRUCTURE_KEY_URL }}" + state: present + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Ubuntu' + retries: 10 + delay: 10 + register: nr_apt_key + until: nr_apt_key is succeeded + +# For focal, use the bionic repo for now. +- name: Install apt repository for New Relic Infrastructure if neither bionic nor focal + apt_repository: + repo: "{{ NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO }}" + state: present + update_cache: yes + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Ubuntu' and (ansible_distribution_release != 'bionic' and ansible_distribution_release != 'focal') + retries: 10 + delay: 10 + register: nr_apt_repo + until: nr_apt_repo is succeeded + +- name: Install apt repository for New Relic Infrastructure if bionic + apt_repository: + repo: "{{ NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO_BIONIC }}" + state: present + update_cache: yes + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'bionic' + retries: 10 + delay: 10 + register: nr_apt_repo_bionic + until: nr_apt_repo_bionic is succeeded + +# For focal, use the bionic repo for now. +- name: Install apt repository for New Relic Infrastructure if focal + apt_repository: + repo: "{{ NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO_BIONIC }}" + state: present + update_cache: yes + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'focal' + retries: 10 + delay: 10 + register: nr_apt_repo_focal + until: nr_apt_repo_focal is succeeded + +- name: Install newrelic related system packages for Ubuntu + apt: + name: "{{ newrelic_infrastructure_debian_pkgs }}" + install_recommends: yes + state: latest + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Ubuntu' + retries: 10 + delay: 10 + register: nr_apt_pkg + until: nr_apt_pkg is succeeded + +- name: Configure the New Relic Servers yum repository + yum_repository: + name: "newrelic-infra" + baseurl: "{{ NEWRELIC_INFRASTRUCTURE_AMAZON_REPO }}" + gpgkey: "{{ NEWRELIC_INFRASTRUCTURE_KEY_URL }}" + gpgcheck: "yes" + state: present + description: New Relic Infrastructure + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Amazon' + retries: 10 + delay: 10 + register: nr_yum_repo + until: nr_yum_repo is succeeded + +- name: Install newrelic related system packages for Amazon + yum: + name: "{{ newrelic_infrastructure_redhat_pkgs }}" + enablerepo: "newrelic-infra" + state: latest + update_cache: yes + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Amazon' + retries: 10 + delay: 10 + register: nr_yum_pkg + until: nr_yum_pkg is succeeded + +- name: Install newrelic display name script + copy: + src: "edx/bin/write_nr_display_name_config.sh" + dest: "/edx/bin/write_nr_display_name_config.sh" + owner: root + group: root + mode: u=rwx,g=r,o=r + +- name: configure logging + template: + src: etc/newrelic-infra/logging.d/logs.yml.j2 + dest: /etc/newrelic-infra/logging.d/logs.yml + mode: 0600 + tags: + - install + - install:configuration + when: NEWRELIC_LOGS + + +# In Ubuntu>16.04, /etc/rc.local doesn't exist by default. Since this script isn't +# used by the GoCD instance using Ubuntu 18.04, skip this task when running on Bionic. +# See OPS-3341 and http://manpages.ubuntu.com/manpages/bionic/man8/systemd-rc-local-generator.8.html +- name: Run newrelic display name script on boot + lineinfile: + dest: "/etc/rc.local" + line: "/edx/bin/write_nr_display_name_config.sh" + insertbefore: "exit 0" + mode: "u+x,g+x" + when: ansible_distribution_release != 'bionic' and ansible_distribution_release != 'focal' + +- name: Restart the infrastructure agent if the license key changes + service: + name: newrelic-infra + state: restarted + when: license_key_file.changed + tags: + - install + - install:configuration diff --git a/playbooks/roles/newrelic_infrastructure/templates/etc/newrelic-infra.j2 b/playbooks/roles/newrelic_infrastructure/templates/etc/newrelic-infra.j2 new file mode 100644 index 00000000000..dc9996dff8c --- /dev/null +++ b/playbooks/roles/newrelic_infrastructure/templates/etc/newrelic-infra.j2 @@ -0,0 +1,2 @@ +license_key: {{ NEWRELIC_INFRASTRUCTURE_LICENSE_KEY }} +{{ NEWRELIC_INFRASTRUCTURE_EXTRA_CONFIG }} diff --git a/playbooks/roles/newrelic_infrastructure/templates/etc/newrelic-infra/logging.d/logs.yml.j2 b/playbooks/roles/newrelic_infrastructure/templates/etc/newrelic-infra/logging.d/logs.yml.j2 new file mode 100644 index 00000000000..b52a6fabfa3 --- /dev/null +++ b/playbooks/roles/newrelic_infrastructure/templates/etc/newrelic-infra/logging.d/logs.yml.j2 @@ -0,0 +1 @@ +{{ NEWRELIC_LOGS | to_nice_yaml }} diff --git a/playbooks/roles/nginx/README.md b/playbooks/roles/nginx/README.md deleted file mode 100644 index 736860e7036..00000000000 --- a/playbooks/roles/nginx/README.md +++ /dev/null @@ -1 +0,0 @@ -* main.yml: installs nginx and will enable the basic nginx configuration for version introspection diff --git a/playbooks/roles/nginx/README.rst b/playbooks/roles/nginx/README.rst new file mode 100644 index 00000000000..224e9283691 --- /dev/null +++ b/playbooks/roles/nginx/README.rst @@ -0,0 +1,2 @@ +- main.yml: installs nginx and will enable the basic nginx configuration for + version introspection diff --git a/playbooks/roles/nginx/defaults/main.yml b/playbooks/roles/nginx/defaults/main.yml index 5d53ac28ba5..20aa8cc4ad7 100644 --- a/playbooks/roles/nginx/defaults/main.yml +++ b/playbooks/roles/nginx/defaults/main.yml @@ -1,9 +1,52 @@ # Variables for nginx role --- -# Set global htaccess for nginx -NGINX_HTPASSWD_USER: !!null -NGINX_HTPASSWD_PASS: !!null +# These are paramters to the role +# and should be overridden +nginx_sites: [] +# If you want to install multiple sites with nginx_site but enable +# them yourself (if you're using a single build for multiple deploys) +# you can skip having them link into sites-enabled and do it during boot. +nginx_skip_enable_sites: False +nginx_redirects: {} +nginx_extra_sites: [] +nginx_extra_configs: [] +NGINX_CMS_CLIENT_MAX_BODY_SIZE: 100M +NGINX_LMS_CLIENT_MAX_BODY_SIZE: 20M +NGINX_FORUM_CLIENT_MAX_BODY_SIZE: 1M +NGINX_EDXAPP_EXTRA_SITES: [] +NGINX_EDXAPP_EXTRA_CONFIGS: [] + +# Override these vars to alter the memory allocated to map_hash +NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE: False +NGINX_MAP_HASH_MAX_SIZE: 2048 +NGINX_MAP_HASH_BUCKET_SIZE: 64 + +# Override these vars to alter the memory allocated to server_names_hash +NGINX_OVERRIDE_DEFAULT_SERVER_NAMES_HASH_SIZE: False +NGINX_SERVER_NAMES_HASH_BUCKET_SIZE: 64 + +# Override these vars for adding user to nginx.htpasswd +NGINX_USERS: + - name: "{{ COMMON_HTPASSWD_USER }}" + password: "{{ COMMON_HTPASSWD_PASS }}" + state: present + +# Override these vars for adding user to nginx.htpasswd +# for prospectus preview basic auth +PROSPECTUS_PREVIEW_HTPASSWD_USER: "{{ COMMON_HTPASSWD_USER }}" +PROSPECTUS_PREVIEW_HTPASSWD_PASS: "{{ COMMON_HTPASSWD_PASS }}" +PROSPECTUS_PREVIEW_NGINX_USERS: + - name: "{{ PROSPECTUS_PREVIEW_HTPASSWD_USER }}" + password: "{{ PROSPECTUS_PREVIEW_HTPASSWD_PASS }}" + state: present + +NGINX_ENABLE_IPV6: True + NGINX_ENABLE_SSL: False +NGINX_REDIRECT_TO_HTTPS: False +# Disable handling IP disclosure for private IP addresses. This is needed by ELB to run the health checks while using `NGINX_ENABLE_SSL`. +NGINX_ALLOW_PRIVATE_IP_ACCESS: False +NGINX_HSTS_MAX_AGE: 31536000 # Set these to real paths on your # filesystem, otherwise nginx will # use a self-signed snake-oil cert @@ -13,42 +56,184 @@ NGINX_ENABLE_SSL: False # # cat www.example.com.crt bundle.crt > www.example.com.chained.crt +# This variable is only checked if NGINX_REDIRECT_TO_HTTPS is true +# It should be set to one of !!null, "scheme" or "forward_for_proto" +NGINX_HTTPS_REDIRECT_STRATEGY: "scheme" + NGINX_SSL_CERTIFICATE: 'ssl-cert-snakeoil.pem' NGINX_SSL_KEY: 'ssl-cert-snakeoil.key' +NGINX_SSL_CIPHERS: "'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA'" +NGINX_SSL_PROTOCOLS: "TLSv1.1 TLSv1.2" +NGINX_DH_PARAMS_PATH: "/etc/ssl/private/dhparams.pem" +NGINX_DH_KEYSIZE: 2048 + +NGINX_ENABLE_REQUEST_TRACKING_ID: False +# This can be one of 'p_combined' or 'ssl_combined' by default. If you +# wish to specify your own format then define it in a configuration file +# located under `nginx_conf_dir` and then use the format name specified +# in your configuration file. +NGINX_LOG_FORMAT_NAME: 'p_combined' +# When set to False, nginx will pass X-Forwarded-For, X-Forwarded-Port, +# and X-Forwarded-Proto headers through to the backend unmodified. +# This is desired when nginx is deployed behind another load balancer +# which takes care of properly setting the X-Forwarded-* headers. +# When there is no other load balancer in front of nginx, set this +# variable to True to force nginx to set the values of the X-Forwarded-* +# headers to reflect the properties of the incoming request. +NGINX_SET_X_FORWARDED_HEADERS: False + +# Increasing these values allows studio to process more complex operations. +# Default timeouts limit CMS connections to 60 seconds. + +NGINX_CMS_PROXY_CONNECT_TIMEOUT: !!null +NGINX_CMS_PROXY_SEND_TIMEOUT: !!null +NGINX_CMS_PROXY_READ_TIMEOUT: 60s +NGINX_LMS_PROXY_READ_TIMEOUT: 60s +NGINX_FORUM_PROXY_READ_TIMEOUT: 60s + +NGINX_SERVER_ERROR_IMG: 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/11/Pendleton_Sinking_Ship.jpg/640px-Pendleton_Sinking_Ship.jpg' +NGINX_SERVER_ERROR_IMG_ALT: '' +NGINX_SERVER_ERROR_LANG: 'en' +NGINX_SERVER_ERROR_STYLE_H1: 'font-family: "Helvetica Neue",Helvetica,Roboto,Arial,sans-serif; margin-bottom: .3em; font-size: 2.0em; line-height: 1.25em; text-rendering: optimizeLegibility; font-weight: bold; color: #000000;' +NGINX_SERVER_ERROR_STYLE_P_H2: 'font-family: "Helvetica Neue",Helvetica,Roboto,Arial,sans-serif; margin-bottom: .3em; line-height: 1.25em; text-rendering: optimizeLegibility; font-weight: bold; font-size: 1.8em; color: #5b5e63;' +NGINX_SERVER_ERROR_STYLE_P: 'font-family: Georgia,Cambria,"Times New Roman",Times,serif; margin: auto; margin-bottom: 1em; font-weight: 200; line-height: 1.4em; font-size: 1.1em; max-width: 80%;' +NGINX_SERVER_ERROR_STYLE_DIV: 'margin: auto; width: 800px; text-align: center; padding:20px 0px 0px 0px;' +NGINX_SERVER_HTML_FILES_TEMPLATE: "edx/var/nginx/server-static/server-template.j2" +NGINX_SERVER_HTML_FILES: + - file: rate-limit.html + lang: "{{ NGINX_SERVER_ERROR_LANG }}" + title: 'Rate limit exceeded' + msg: 'We are aware of the error and are working to find a resolution.' + img: "{{ NGINX_SERVER_ERROR_IMG }}" + img_alt: "{{ NGINX_SERVER_ERROR_IMG_ALT }}" + heading: 'Uh oh, we are having some server issues..' + style_h1: "{{ NGINX_SERVER_ERROR_STYLE_H1 }}" + style_p_h2: "{{ NGINX_SERVER_ERROR_STYLE_P_H2 }}" + style_p: "{{ NGINX_SERVER_ERROR_STYLE_P }}" + style_div: "{{ NGINX_SERVER_ERROR_STYLE_DIV }}" + - file: server-error.html + lang: "{{ NGINX_SERVER_ERROR_LANG }}" + title: 'Server error' + msg: 'We are aware of the error and are working to find a resolution.' + img: "{{ NGINX_SERVER_ERROR_IMG }}" + img_alt: "{{ NGINX_SERVER_ERROR_IMG_ALT }}" + heading: 'Uh oh, we are having some server issues..' + style_h1: "{{ NGINX_SERVER_ERROR_STYLE_H1 }}" + style_p_h2: "{{ NGINX_SERVER_ERROR_STYLE_P_H2 }}" + style_p: "{{ NGINX_SERVER_ERROR_STYLE_P }}" + style_div: "{{ NGINX_SERVER_ERROR_STYLE_DIV }}" + + +NGINX_SERVER_STATIC_FILES: [] + +NGINX_APT_REPO: deb http://nginx.org/packages/ubuntu/ {{ ansible_distribution_release }} nginx + nginx_app_dir: "{{ COMMON_APP_DIR }}/nginx" nginx_data_dir: "{{ COMMON_DATA_DIR }}/nginx" +nginx_server_static_dir: "{{ nginx_data_dir }}/server-static" +nginx_server_cache_dir: "{{ nginx_data_dir }}/cache" nginx_conf_dir: "{{ nginx_app_dir }}/conf.d" nginx_log_dir: "{{ COMMON_LOG_DIR }}/nginx" nginx_sites_available_dir: "{{ nginx_app_dir }}/sites-available" -nginx_sites_enabled_dir: "{{ nginx_app_dir }}/sites-enabled" +nginx_sites_enabled_dir: "/etc/nginx/sites-enabled" nginx_user: root nginx_htpasswd_file: "{{ nginx_app_dir }}/nginx.htpasswd" nginx_default_sites: [] -nginx_debian_pkgs: - - nginx - - python-passlib -nginx_xserver_gunicorn_hosts: - - 127.0.0.1 +nginx_release_specific_debian_pkgs: + xenial: + - python-passlib + - python3-passlib + bionic: + - python-passlib + focal: + - python3-passlib + +nginx_debian_pkgs: "{{ nginx_release_specific_debian_pkgs[ansible_distribution_release] }}" + +NGINX_EDXAPP_ENABLE_S3_MAINTENANCE: False +nginx_default_error_page: "/server/server-error.html" +NGINX_EDXAPP_ERROR_PAGES: + "500": "{{ nginx_default_error_page }}" + "502": "{{ nginx_default_error_page }}" + "504": "{{ nginx_default_error_page }}" + +NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS: false + +NGINX_EDXAPP_FAVICON_PATH: "/static/{{ NGINX_EDXAPP_DEFAULT_SITE_THEME }}images/favicon.ico" + +CMS_HOSTNAME: '~^((stage|prod)-)?studio.*' + +nginx_template_dir: "edx/app/nginx/sites-available" + nginx_xqueue_gunicorn_hosts: - 127.0.0.1 -nginx_ora_gunicorn_hosts: - - 127.0.0.1 nginx_lms_gunicorn_hosts: - 127.0.0.1 nginx_lms_preview_gunicorn_hosts: - 127.0.0.1 nginx_cms_gunicorn_hosts: - 127.0.0.1 +nginx_analytics_api_gunicorn_hosts: + - 127.0.0.1 +nginx_insights_gunicorn_hosts: + - 127.0.0.1 +nginx_gitreload_gunicorn_hosts: + - 127.0.0.1 +nginx_edx_notes_api_gunicorn_hosts: + - 127.0.0.1 +nginx_ecommerce_gunicorn_hosts: + - 127.0.0.1 +nginx_credentails_gunicorn_hosts: + - 127.0.0.1 + +NGINX_ROBOT_RULES: [ ] +NGINX_EDXAPP_EMBARGO_CIDRS: [] +NGINX_P3P_MESSAGE: 'CP="Open edX does not have a P3P policy."' + +COMMON_ENABLE_BASIC_AUTH: False + +REDIRECT_NGINX_PORT: "{{ EDXAPP_LMS_NGINX_PORT }}" +REDIRECT_SSL_NGINX_PORT: "{{ EDXAPP_LMS_SSL_NGINX_PORT }}" + +ECOMMERCE_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" +EDXAPP_CMS_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" +EDXAPP_LMS_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" +EDXAPP_LMS_PREVIEW_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" +KIBANA_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" +PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" +XQUEUE_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" + +NGINX_CREATE_HTPASSWD_FILE: > + {{ + ECOMMERCE_ENABLE_BASIC_AUTH|bool or + EDXAPP_CMS_ENABLE_BASIC_AUTH|bool or + EDXAPP_LMS_ENABLE_BASIC_AUTH|bool or + EDXAPP_LMS_PREVIEW_ENABLE_BASIC_AUTH|bool or + KIBANA_ENABLE_BASIC_AUTH|bool or + XQUEUE_ENABLE_BASIC_AUTH|bool + }} + +# Extra settings to add to site configuration for Studio +NGINX_EDXAPP_CMS_APP_EXTRA: "" +# Extra settings to add to site configuration for LMS +NGINX_EDXAPP_LMS_APP_EXTRA: "" + +# If comprehensive theme enabled, write down the name of +# the theme as in EDXAPP_DEFAULT_SITE_THEME ending with / +# to allow to override favicon properly. +# Example: "your-site-theme/" +NGINX_EDXAPP_DEFAULT_SITE_THEME: "" + +# List of subnet or IP addressess to allow to access admin endpoints +NGINX_ADMIN_ACCESS_CIDRS: [] + +# Set trusted network subnets or IP addresses to send correct replacement addresses +NGINX_TRUSTED_IP_CIDRS: "0.0.0.0/0" -nginx_cfg: - # - link - turn on - # - absent - turn off - sites_enabled: - basic_auth: link - edx_release: link - # path to version files for the basic - # nginx configuration - version_html: "{{ nginx_app_dir }}/versions.html" - version_json: "{{ nginx_app_dir }}/versions.json" +EDXAPP_SET_PROXY_BUFFER_SIZE: False +EDXAPP_PROXY_BUFFER_SIZE: 128k +EDXAPP_PROXY_BUFFERS_SIZE: 256k +EDXAPP_PROXY_BUFFERS_NUMBER: 4 +EDXAPP_PROXY_BUSY_BUFFERS_SIZE: 256k diff --git a/playbooks/roles/nginx/handlers/main.yml b/playbooks/roles/nginx/handlers/main.yml index 81b7f6b7092..9b55c2a3933 100644 --- a/playbooks/roles/nginx/handlers/main.yml +++ b/playbooks/roles/nginx/handlers/main.yml @@ -1,6 +1,10 @@ --- - name: restart nginx - service: name=nginx state=restarted + service: + name: nginx + state: restarted - name: reload nginx - service: name=nginx state=reloaded + service: + name: nginx + state: reloaded diff --git a/playbooks/roles/nginx/tasks/main.yml b/playbooks/roles/nginx/tasks/main.yml index 70facf9e299..21cb9b4f292 100644 --- a/playbooks/roles/nginx/tasks/main.yml +++ b/playbooks/roles/nginx/tasks/main.yml @@ -2,133 +2,454 @@ # - common/tasks/main.yml --- -- name: create nginx app dirs - file: > - path="{{ item }}" - state=directory - owner="{{ nginx_user }}" - group="{{ common_web_group }}" +- name: Create Diffie-Hellman parameters to prevent weak key exchange + command: openssl dhparam -out "{{ NGINX_DH_PARAMS_PATH | basename }}" {{ NGINX_DH_KEYSIZE }} + args: + chdir: "{{ NGINX_DH_PARAMS_PATH | dirname }}" + creates: "{{ NGINX_DH_PARAMS_PATH }}" + tags: + - install + - install:configuration + +- name: Restrict permissions of DH parameters file + file: + path: "{{ NGINX_DH_PARAMS_PATH }}" + owner: "root" + group: "root" + mode: 0600 + tags: + - install + - install:configuration + +- name: Create nginx app and data dirs + file: + path: "{{ item.path }}" + state: directory + owner: "{{ item.owner }}" + group: "{{ item.group }}" with_items: - - "{{ nginx_app_dir }}" - - "{{ nginx_sites_available_dir }}" - - "{{ nginx_sites_enabled_dir }}" + - { path: '{{ nginx_app_dir }}', owner: '{{ nginx_user }}', group: '{{ common_web_group }}' } + - { path: '{{ nginx_sites_available_dir }}', owner: '{{ nginx_user }}', group: '{{ common_web_group }}' } + - { path: '{{ nginx_sites_enabled_dir }}', owner: '{{ nginx_user }}', group: '{{ common_web_group }}' } + - { path: '{{ nginx_conf_dir }}', owner: '{{ nginx_user }}', group: '{{ common_web_group }}' } + - { path: '{{ nginx_data_dir }}', owner: '{{ common_web_user }}', group: '{{ nginx_user }}' } + - { path: '{{ nginx_log_dir }}', owner: '{{ common_web_user }}', group: '{{ nginx_user }}' } + - { path: '{{ nginx_server_static_dir }}', owner: '{{ common_web_user }}', group: '{{ nginx_user }}' } + - { path: '{{ nginx_server_cache_dir }}', owner: '{{ common_web_user }}', group: '{{ nginx_user }}' } notify: restart nginx + tags: + - install + - install:configuration -- name: create nginx data dirs - file: > - path="{{ item }}" - state=directory - owner="{{ common_web_user }}" - group="{{ nginx_user }}" - with_items: - - "{{ nginx_data_dir }}" - - "{{ nginx_log_dir }}" +- name: Install needed packages + apt: + name: "{{ nginx_debian_pkgs }}" + state: present + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + notify: restart nginx + tags: + - install + - install:system-requirements + +- name: Add apt key + apt_key: + url: "http://nginx.org/keys/nginx_signing.key" + state: present notify: restart nginx + tags: + - install + - install:system-requirements -- name: Install nginx packages - apt: pkg={{','.join(nginx_debian_pkgs)}} state=present +- name: Add nginx repository + apt_repository: + repo: "{{ NGINX_APT_REPO }}" + state: present + update_cache: yes + register: add_repo + until: add_repo is success + retries: 10 + delay: 5 notify: restart nginx + tags: + - install + - install:system-requirements -- name: Server configuration file - template: > - src=nginx.conf.j2 dest=/etc/nginx/nginx.conf - owner=root group={{ common_web_user }} mode=0644 +# REMOVE THIS AFTER LATEST NGINX HAS BEEN DEPLOYED EVERYWHERE +# New package does not identify conflicts properly. +# "nginx-common" only appears as requirement for ubuntu-distributed package, thus +# removing it will remove all nginx packages installed from Ubuntu's repo. +# This is only required if nginx was previously installed from Ubuntu's repo +# and you're switching to Nginx's PPA +- name: Remove old nginx packages + apt: + name: nginx-common + state: absent + tags: + - install + - install:system-requirements + +- name: Install the nginx package + apt: + name: nginx + state: latest + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + notify: restart nginx + tags: + - install + - install:system-requirements + +- name: Remove the default site + file: + path: /etc/nginx/sites-enabled/default + state: absent notify: reload nginx + tags: + - install + - install:configuration + +- name: Define server configuration file and common nginx configuration + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: root + group: "{{ item.group }}" + mode: "{{ item.mode }}" + with_items: + - { src: 'etc/nginx/nginx.conf.j2', dest: '/etc/nginx/nginx.conf', group: '{{ common_web_user }}', mode: "0644" } + - { src: 'edx/app/nginx/sites-available/maps.j2', dest: '{{ nginx_sites_available_dir }}/maps', group: 'root', mode: "0600" } + notify: restart nginx + tags: + - install + - install:configuration -- name: Creating common nginx configuration - template: > - src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release - owner=root group=root mode=0600 +- name: Create robot rules + template: + src: "edx/app/nginx/robots.txt.j2" + dest: "{{ nginx_app_dir }}/robots.txt" + owner: root + group: "{{ common_web_user }}" + mode: 0644 notify: reload nginx + tags: + - install + - install:configuration - name: Creating link for common nginx configuration - file: > - src={{ nginx_sites_available_dir }}/edx-release - dest={{ nginx_sites_enabled_dir }}/edx-release - state=link owner=root group=root + file: + src: "{{ nginx_sites_available_dir }}/{{ item }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ item }}" + state: link + owner: root + group: root notify: reload nginx + with_items: + - "maps" + tags: + - install + - install:configuration - name: Copying nginx configs for {{ nginx_sites }} - template: > - src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }} - owner=root group={{ common_web_user }} mode=0640 + template: + src: "{{ nginx_template_dir }}/{{ item }}.j2" + dest: "{{ nginx_sites_available_dir }}/{{ item }}" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + with_items: "{{ nginx_sites }}" notify: reload nginx - with_items: nginx_sites + tags: + - install + - install:configuration - name: Creating nginx config links for {{ nginx_sites }} - file: > - src={{ nginx_sites_available_dir }}/{{ item }} - dest={{ nginx_sites_enabled_dir }}/{{ item }} - state=link owner=root group=root + file: + src: "{{ nginx_sites_available_dir }}/{{ item }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ item }}" + state: link + owner: root + group: root + with_items: "{{ nginx_sites }}" + when: not nginx_skip_enable_sites + notify: reload nginx + tags: + - install + - install:configuration + +- name: Copying nginx extra configs + template: + src: "{{ item }}" + dest: "{{ nginx_sites_available_dir }}/{{ (item | basename).rstrip('.j2') }}" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + with_items: "{{ nginx_extra_sites }}" + notify: reload nginx + tags: + - install + - install:configuration + +- name: Creating links for nginx extra configs + file: + src: "{{ nginx_sites_available_dir }}/{{ (item | basename).rstrip('.j2') }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ (item | basename).rstrip('.j2') }}" + state: link + owner: root + group: root + with_items: "{{ nginx_extra_sites }}" + notify: reload nginx + tags: + - install + - install:configuration + +- name: Copying custom nginx config + template: + src: "{{ item }}" + dest: "{{ nginx_conf_dir }}/{{ (item | basename).rstrip('.j2') }}" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + # Ansible 2.4 changed follow symlinks default to "no". We need this for edx-east symlink + # https://docs.ansible.com/ansible/latest/modules/template_module.html + follow: yes + with_items: "{{ nginx_extra_configs }}" notify: reload nginx - with_items: nginx_sites + tags: + - install + - install:configuration + +- name: Copying nginx redirect configs for {{ nginx_redirects }} + template: + src: "{{ nginx_template_dir }}/nginx_redirect.j2" + dest: "{{ nginx_sites_available_dir }}/{{ item.key }}" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + with_dict: "{{ nginx_redirects }}" + notify: reload nginx + tags: + - install + - install:configuration + +- name: Creating nginx redirect links for {{ nginx_redirects }} + file: + src: "{{ nginx_sites_available_dir }}/{{ item.key }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ item.key }}" + state: link + owner: root + group: root + with_dict: "{{ nginx_redirects }}" + notify: reload nginx + tags: + - install + - install:configuration + + # These are static pages that can be used + # for nginx rate limiting, 500 errors, etc. + +- name: Create NGINX server templates + template: + src: "{{ NGINX_SERVER_HTML_FILES_TEMPLATE }}" + dest: "{{ nginx_server_static_dir }}/{{ item.file }}" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + with_items: "{{ NGINX_SERVER_HTML_FILES }}" + tags: + - install + - install:configuration + +- name: Copy static files + copy: + src: "{{ item }}" + dest: "{{ nginx_server_static_dir }}" + owner: "{{ common_web_user }}" + group: "{{ common_web_user }}" + mode: "0640" + with_items: "{{ NGINX_SERVER_STATIC_FILES }}" + tags: + - install + - install:configuration - name: Write out htpasswd file - htpasswd: > - name={{ NGINX_HTPASSWD_USER }} - password={{ NGINX_HTPASSWD_PASS }} - path={{ nginx_htpasswd_file }} - when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS + htpasswd: + name: "{{ item.name }}" + password: "{{ item.password }}" + state: "{{ item.state }}" + path: "{{ nginx_htpasswd_file }}" + with_items: "{{ NGINX_USERS }}" + when: NGINX_CREATE_HTPASSWD_FILE + no_log: True + tags: + - install + - install:configuration + +- name: Write out htpasswd file for prospectus preview pages + htpasswd: + name: "{{ item.name }}" + password: "{{ item.password }}" + state: "{{ item.state }}" + path: "{{ nginx_htpasswd_file }}" + with_items: "{{ PROSPECTUS_PREVIEW_NGINX_USERS }}" + when: PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH + tags: + - install + - install:configuration - name: Create nginx log file location (just in case) - file: > - path={{ nginx_log_dir}} state=directory - owner={{ common_web_user }} group={{ common_web_user }} + file: + path: "{{ nginx_log_dir}}" + state: directory + owner: "{{ common_web_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration # Check to see if the ssl cert/key exists before copying. # This extra check is done to prevent failures when # ansible-playbook is run locally -- stat: path={{ NGINX_SSL_CERTIFICATE }} +- local_action: + module: stat + path: "{{ NGINX_SSL_CERTIFICATE }}" + become: False + connection: local register: ssl_cert + tags: + - install + - install:configuration -- stat: path={{ NGINX_SSL_KEY }} +- local_action: + module: stat + path: "{{ NGINX_SSL_KEY }}" + become: False + connection: local register: ssl_key + tags: + - install + - install:configuration - name: copy ssl cert - copy: > - src={{ NGINX_SSL_CERTIFICATE }} - dest=/etc/ssl/certs/ - owner=root group=root mode=0644 + copy: + src: "{{ NGINX_SSL_CERTIFICATE }}" + dest: "/etc/ssl/certs/" + owner: root + group: root + mode: 0644 when: ssl_cert.stat.exists and NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem' + tags: + - install + - install:configuration - name: copy ssl key - copy: > - src={{ NGINX_SSL_KEY }} - dest=/etc/ssl/private/ - owner=root group=root mode=0640 + copy: + src: "{{ NGINX_SSL_KEY }}" + dest: "/etc/ssl/private/" + owner: root + group: root + mode: 0640 when: ssl_key.stat.exists and NGINX_ENABLE_SSL and NGINX_SSL_KEY != 'ssl-cert-snakeoil.key' + no_log: True + tags: + - install + - install:configuration -# removing default link -- name: Removing default nginx config and restart (enabled) - file: path={{ nginx_sites_enabled_dir }}/default state=absent - notify: reload nginx - -# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good +- name: copy ssl cert from variable + copy: + content: "{{ NGINX_SSL_CERT_VAR }}" + dest: "/etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}" + owner: root + group: root + mode: 0644 + when: + - not ssl_cert.stat.exists and NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem' + - NGINX_SSL_CERT_VAR is defined + tags: + - install + - install:configuration -- name: Set up nginx access log rotation - template: > - dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2 - owner=root group=root mode=644 +- name: copy ssl key from variable + copy: + content: "{{ NGINX_SSL_KEY_VAR }}" + dest: "/etc/ssl/private/{{ NGINX_SSL_KEY|basename }}" + owner: root + group: root + mode: 0640 + when: + - not ssl_key.stat.exists and NGINX_ENABLE_SSL and NGINX_SSL_KEY != 'ssl-cert-snakeoil.key' + - NGINX_SSL_KEY_VAR is defined + no_log: True + tags: + - install + - install:configuration # removing default link - name: Removing default nginx config and restart (enabled) - file: path={{ nginx_sites_enabled_dir }}/default state=absent + file: + path: "{{ nginx_sites_enabled_dir }}/default" + state: absent notify: reload nginx + tags: + - install + - install:configuration # Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good - name: Set up nginx access log rotation - template: > - dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2 - owner=root group=root mode=644 + template: + src: "etc/logrotate.d/edx_logrotate_nginx_access.j2" + dest: "/etc/logrotate.d/nginx-access" + owner: root + group: root + mode: 0644 + tags: + - install + - install:configuration -- name: Set up nginx access log rotation - template: > - dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2 - owner=root group=root mode=644 +- name: Set up nginx error log rotation + template: + src: "etc/logrotate.d/edx_logrotate_nginx_error.j2" + dest: "/etc/logrotate.d/nginx-error" + owner: root + group: root + mode: 0644 + tags: + - install + - install:configuration + +# Test the nginx configs before restarting nginx so that any errors are visible and not hidden in +# the service logs. +- name: Test nginx configs + command: nginx -t + tags: + - install + - install:configuration + +# nginx is started during package installation, before any of the configuration files are in place. +# The handler that reloads the configuration would be run only at the very end of the playbook, so +# none of the local services would be available in the meantime, e.g. causing certs to error out +# since it can't reach xqueue on its nginx port. For this reason, we flush the handlers here +# to ensure the nginx configuration is reloaded when necessary. +- name: Restart or reload nginx if necessary + meta: flush_handlers + tags: + - install + - install:configuration # If tasks that notify restart nginx don't change the state of the remote system # their corresponding notifications don't get run. If nginx has been stopped for # any reason, this will ensure that it is started up again. - name: make sure nginx has started - service: name=nginx state=started + service: + name: nginx + state: started + tags: + - manage + - manage:start diff --git a/playbooks/roles/nginx/templates/basic-auth.j2 b/playbooks/roles/nginx/templates/basic-auth.j2 deleted file mode 100644 index 840f4ff2315..00000000000 --- a/playbooks/roles/nginx/templates/basic-auth.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{% if NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS %} - satisfy any; - - allow 127.0.0.1; - deny all; - - auth_basic "Restricted"; - auth_basic_user_file {{ nginx_htpasswd_file }}; - - index index.html - proxy_set_header X-Forwarded-Proto https; -{% endif %} diff --git a/playbooks/roles/nginx/templates/cms.j2 b/playbooks/roles/nginx/templates/cms.j2 deleted file mode 100644 index 1a0a8c41266..00000000000 --- a/playbooks/roles/nginx/templates/cms.j2 +++ /dev/null @@ -1,107 +0,0 @@ -{%- if "cms" in nginx_default_sites -%} - {%- set default_site = "default" -%} -{%- else -%} - {%- set default_site = "" -%} -{%- endif -%} - -upstream cms-backend { - {% for host in nginx_cms_gunicorn_hosts %} - server {{ host }}:{{ edxapp_cms_gunicorn_port }} fail_timeout=0; - {% endfor %} -} - -server { - # CMS configuration file for nginx, templated by ansible - - {% if NGINX_ENABLE_SSL %} - - listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}}; - listen {{EDXAPP_CMS_SSL_NGINX_PORT}} ssl; - - ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; - ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; - - {% else %} - listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}}; - {% endif %} - - - server_name ~^((stage|prod)-)?studio\..*; - - access_log {{ nginx_log_dir }}/access.log; - error_log {{ nginx_log_dir }}/error.log error; - - # CS184 requires uploads of up to 4MB for submitting screenshots. - # CMS requires larger value for course assest, values provided - # via hiera. - client_max_body_size 100M; - - rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; - - - location @proxy_to_cms_app { - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $http_host; - - proxy_redirect off; - proxy_pass http://cms-backend; - } - - location / { - {% include "basic-auth.j2" %} - try_files $uri @proxy_to_cms_app; - } - - # No basic auth security on the github_service_hook url, so that github can use it for cms - location /github_service_hook { - try_files $uri @proxy_to_cms_app; - } - - # No basic auth security on the heartbeat url, so that ELB can use it - location /heartbeat { - try_files $uri @proxy_to_cms_app; - } - - # Check security on this - location ~ /static/(?P.*) { - root {{ edxapp_data_dir }}; - try_files /staticfiles/$file /course_static/$file =404; - - # return a 403 for static files that shouldn't be - # in the staticfiles directory - location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) { - return 403; - } - - # http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default - location ~ "/static/(?P.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" { - expires max; - add_header Access-Control-Allow-Origin *; - try_files /staticfiles/$collected /course_static/$collected =404; - } - - # Set django-pipelined files to maximum cache time - location ~ "/static/(?P.*\.[0-9a-f]{12}\..*)" { - expires max; - # Without this try_files, files that have been run through - # django-pipeline return 404s - try_files /staticfiles/$collected /course_static/$collected =404; - } - - # Expire other static files immediately (there should be very few / none of these) - expires epoch; - } - - # Forward to HTTPS if we're an HTTP request... - if ($http_x_forwarded_proto = "http") { - set $do_redirect "true"; - } - - # Run our actual redirect... - if ($do_redirect = "true") { - rewrite ^ https://$host$request_uri? permanent; - } - -} diff --git a/playbooks/roles/nginx/templates/devpi.j2 b/playbooks/roles/nginx/templates/devpi.j2 deleted file mode 100644 index b99c9666d67..00000000000 --- a/playbooks/roles/nginx/templates/devpi.j2 +++ /dev/null @@ -1,16 +0,0 @@ -server { - listen {{ devpi_nginx_port }}; - server_name {{ devpi_server_name }}; - gzip on; - gzip_min_length 2000; - gzip_proxied any; - gzip_types text/html application/json; - - location / { - root {{ devpi_data_dir }}; - proxy_pass http://localhost:{{ devpi_port }}; - proxy_set_header X-outside-url $scheme://$host; - proxy_set_header X-Real-IP $remote_addr; - } -} - diff --git a/playbooks/roles/nginx/templates/discern.j2 b/playbooks/roles/nginx/templates/discern.j2 deleted file mode 100644 index b38c1684c70..00000000000 --- a/playbooks/roles/nginx/templates/discern.j2 +++ /dev/null @@ -1,32 +0,0 @@ -server { - listen {{ DISCERN_NGINX_PORT }}; - server_name localhost; - - set $my_host $http_host; - if ($host ~ "\d+\.\d+\.\d+\.\d+") { - set $my_host "127.0.0.1"; - } - - # https://docs.djangoproject.com/en/dev/howto/static-files/#serving-static-files-in-production - location /static/ { # STATIC_URL - alias {{ discern_app_dir }}/staticfiles/; - expires 1m; - autoindex on; - } - - location /media/ { # MEDIA_URL - alias /home/www/myhostname/static/; # MEDIA_ROOT - expires 30d; - } - - location / { - {% include "basic-auth.j2" %} - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $my_host; - - proxy_pass http://127.0.0.1:7999; - } -} - diff --git a/playbooks/roles/nginx/templates/edx-release.j2 b/playbooks/roles/nginx/templates/edx-release.j2 deleted file mode 100644 index 8ec09d0f20a..00000000000 --- a/playbooks/roles/nginx/templates/edx-release.j2 +++ /dev/null @@ -1,11 +0,0 @@ -server { - listen 8099 default_server; - - location = /versions.html { - alias {{ nginx_cfg.version_html }}; - } - - location = /versions.json { - alias {{ nginx_cfg.version_json }}; - } -} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/robots.txt.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/robots.txt.j2 new file mode 100644 index 00000000000..d25fbe499b0 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/robots.txt.j2 @@ -0,0 +1,28 @@ +{% for item in NGINX_ROBOT_RULES %} +{% if item.agent is string %} +User-agent: {{ item.agent }} +{% else %} +{% for agent in item.agent %} +User-agent: {{ agent }} +{% endfor %} +{% endif %} +{% if item.allow is defined %} +{% if item.allow is string %} +Allow: {{ item.allow }} +{% else %} +{% for allow in item.allow %} +Allow: {{ allow }} +{% endfor %} +{% endif %} +{% endif %} +{% if item.disallow is defined %} +{% if item.disallow is string %} +Disallow: {{ item.disallow }} +{% else %} +{% for disallow in item.disallow %} +Disallow: {{ disallow }} +{% endfor %} +{% endif %} +{% endif %} + +{% endfor %} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/analytics_api.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/analytics_api.j2 new file mode 100644 index 00000000000..ccafcc823aa --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/analytics_api.j2 @@ -0,0 +1,64 @@ +upstream analytics_api_app_server { + {% for host in nginx_analytics_api_gunicorn_hosts %} + server {{ host }}:{{ analytics_api_gunicorn_port }} fail_timeout=0; + {% endfor %} +} + +server { + listen {{ ANALYTICS_API_NGINX_PORT }} default_server; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ ANALYTICS_API_NGINX_PORT }} default_server; + {% endif %} + + # Nginx does not support nested condition or or conditions so + # there is an unfortunate mix of conditonals here. + {% if NGINX_REDIRECT_TO_HTTPS %} + {% if NGINX_HTTPS_REDIRECT_STRATEGY == "scheme" %} + # Redirect http to https over single instance + if ($scheme != "https") + { + set $do_redirect_to_https "true"; + } + + {% elif NGINX_HTTPS_REDIRECT_STRATEGY == "forward_for_proto" %} + + # Forward to HTTPS if we're an HTTP request... and the server is behind ELB + if ($http_x_forwarded_proto = "http") + { + set $do_redirect_to_https "true"; + } + {% endif %} + + # Execute the actual redirect + if ($do_redirect_to_https = "true") + { + return 301 https://$host$request_uri; + } + {% endif %} + + location ~ ^/static/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}; + try_files /staticfiles/$file =404; + } + + location / { + try_files $uri @proxy_to_app; + } + + {% include "robots.j2" %} + +location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://analytics_api_app_server; + } +} + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/basic-auth.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/basic-auth.j2 new file mode 100644 index 00000000000..dc490fa3fbf --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/basic-auth.j2 @@ -0,0 +1,15 @@ + satisfy any; + + allow 127.0.0.1; + + {% for cidr in COMMON_BASIC_AUTH_EXCEPTIONS %} + allow {{ cidr }}; + {% endfor %} + + deny all; + + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + + index index.html + proxy_set_header X-Forwarded-Proto https; diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 new file mode 100644 index 00000000000..c6616c38800 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 @@ -0,0 +1,168 @@ +{%- if "cms" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +upstream cms-backend { + {% for host in nginx_cms_gunicorn_hosts %} + server {{ host }}:{{ edxapp_cms_gunicorn_port }} fail_timeout=0; + {% endfor %} +} + +{% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + # The Origin request header indicates where a fetch originates from. It doesn't include any path information, + # but only the server name (e.g. https://www.example.com). + # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin for details. + # + # Here we set the value that is included in the Access-Control-Allow-Origin response header. If the origin is one + # of our known hosts--served via HTTP or HTTPS--we allow for CORS. Otherwise, we set the "null" value, disallowing CORS. + map $http_origin $cors_origin { + default "null"; + {% for host in EDXAPP_CORS_ORIGIN_WHITELIST %} + "~*^https?:\/\/{{ host|replace('.', '\.') }}$" $http_origin; + {% endfor %} + } +{% endif %} + + +server { + # CMS configuration file for nginx, templated by ansible + + # Proxy to a remote maintanence page + {% if NGINX_EDXAPP_ENABLE_S3_MAINTENANCE %} + + # Do not include a 502 error in NGINX_ERROR_PAGES when + # NGINX_EDXAPP_ENABLE_S3_MAINTENANCE is enabled. + + # Return a 503 instead so that it passes through Cloudflare + error_page 502 =503 @maintenance; + + {% include "s3_maintenance.j2" %} + + {% endif %} + + # error pages + {% for k, v in NGINX_EDXAPP_ERROR_PAGES.items() %} +error_page {{ k }} {{ v }}; + {% endfor %} + + {% if NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + +{% include "empty_json.j2" %} + + listen {{ EDXAPP_CMS_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ EDXAPP_CMS_NGINX_PORT }} {{ default_site }}; + {% endif %} + + {% if NGINX_ENABLE_SSL %} + + listen {{ EDXAPP_CMS_SSL_NGINX_PORT }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ EDXAPP_CMS_SSL_NGINX_PORT }} ssl; + {% endif %} + + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + {% endif %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; + {% endif %} + + # prevent the browser from doing MIME-type sniffing + add_header X-Content-Type-Options nosniff; + + # Prevent invalid display courseware in IE 10+ with high privacy settings + add_header P3P '{{ NGINX_P3P_MESSAGE }}'; + + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + # To track requests + add_header X-Request-ID $request_tracking_id; + {% endif %} + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + server_name {{ CMS_HOSTNAME }}; + + access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }}; + error_log {{ nginx_log_dir }}/error.log error; + + # CS184 requires uploads of up to 4MB for submitting screenshots. + # CMS requires larger value for course assest, values provided + # via hiera. + client_max_body_size {{ NGINX_CMS_CLIENT_MAX_BODY_SIZE }}; + proxy_read_timeout {{ NGINX_CMS_PROXY_READ_TIMEOUT }}; + + rewrite ^(.*)/favicon.ico$ {{ NGINX_EDXAPP_FAVICON_PATH }} last; + + {% include "python_lib.zip.j2" %} + {% include "common-settings.j2" %} + + location @proxy_to_cms_app { +{% include "cms_proxy.j2" %} + } + + location @proxy_to_cms_app_api { + error_page 504 @empty_json; + error_page 502 @empty_json; + error_page 500 @empty_json; + +{% include "cms_proxy.j2" %} + } + + location / { + {% if EDXAPP_CMS_ENABLE_BASIC_AUTH|bool %} + {% include "basic-auth.j2" %} + {% endif %} + try_files $uri @proxy_to_cms_app; + } + + # No basic auth security on the github_service_hook url, so that github can use it for cms + location /github_service_hook { + try_files $uri @proxy_to_cms_app; + } + + # No basic auth security on the heartbeat url, so that ELB can use it + location /heartbeat { + # If /edx/var/nginx/server-static/maintenance_heartbeat.txt exists serve an + # empty 200 so the instance stays in the load balancer to serve the + # maintenance page + if (-f /edx/var/nginx/server-static/maintenance_heartbeat.txt) { + return 200; + } + try_files $uri @proxy_to_cms_app; + } + + # The api is accessed using OAUTH2 which + # uses the authorization header so we can't have + # basic auth on it as well. + location /api { + try_files $uri @proxy_to_cms_app_api; + } + +location ~ ^{{ EDXAPP_MEDIA_URL }}/(?P.*) { + root {{ edxapp_media_dir }}; + try_files /$file =404; +} + +{% if NGINX_ADMIN_ACCESS_CIDRS and EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION %} + location /admin { + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} + allow {{ cidr }}; + {% endfor %} + deny all; + try_files $uri @proxy_to_cms_app; + } +{% endif %} + + {% include "robots.j2" %} + {% include "static-files.j2" %} + +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms_proxy.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms_proxy.j2 new file mode 100644 index 00000000000..664f9f5ee15 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms_proxy.j2 @@ -0,0 +1,40 @@ +{% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + proxy_set_header X-Request-ID $request_tracking_id; + {% endif %} +{% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + proxy_set_header X-Request-ID $request_tracking_id; + {% endif %} +{% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://cms-backend; +{% if NGINX_CMS_PROXY_CONNECT_TIMEOUT %} + proxy_connect_timeout {{ NGINX_CMS_PROXY_CONNECT_TIMEOUT }}; +{% endif %} +{% if NGINX_CMS_PROXY_SEND_TIMEOUT %} + proxy_send_timeout {{ NGINX_CMS_PROXY_SEND_TIMEOUT }}; +{% endif %} +{% if NGINX_CMS_PROXY_READ_TIMEOUT %} + proxy_read_timeout {{ NGINX_CMS_PROXY_READ_TIMEOUT }}; +{% endif %} + +{% if EDXAPP_SET_PROXY_BUFFER_SIZE %} + proxy_buffer_size {{EDXAPP_PROXY_BUFFER_SIZE}}; + proxy_buffers {{EDXAPP_PROXY_BUFFERS_NUMBER}} {{EDXAPP_PROXY_BUFFERS_SIZE}}; + proxy_busy_buffers_size {{EDXAPP_PROXY_BUSY_BUFFERS_SIZE}}; +{% endif %} + +{{ NGINX_EDXAPP_CMS_APP_EXTRA }} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/common-settings.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/common-settings.j2 new file mode 100644 index 00000000000..962a42589a4 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/common-settings.j2 @@ -0,0 +1,8 @@ + +# Common settings used across nginx configurations + +# Disables server version feedback on pages and in headers +server_tokens off; + +# Increase accepted header size to account for overenthusiastic usage of cookies +large_client_header_buffers 8 16k; diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/conductor.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/conductor.j2 new file mode 100644 index 00000000000..5c193f0e1a8 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/conductor.j2 @@ -0,0 +1,68 @@ +{%- if "conductor" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + # Conductor configuration file for nginx, templated by ansible + + {% if NGINX_CONDUCTOR_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + + # Catches 404s from S3 and returns the default nginx 404 page instead + error_page 404 @error404; + + location @error404 { + return 404; + } + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + {% endif %} + + listen {{ CONDUCTOR_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ CONDUCTOR_NGINX_PORT }} {{ default_site }}; + {% endif %} + + # Redirects using the client port instead of the port the service is running + # on. This prevents redirects to the local 8000 port. + port_in_redirect off; + + {% if CONDUCTOR_REDIRECT_ROOT %} + location = / { + return 302 {{ CONDUCTOR_ROOT_REDIRECT_PATH }}; + } + {% endif %} + + # CONDUCTOR_STATIC_SITES will be a list of dictionaries which have a: + # - router_path: The path you will go to on the router to access the content + # - proxied_path: The path to proxy the requests to + {% for static_site in CONDUCTOR_STATIC_SITES %} + + # Matches: // + location = /{{ static_site.router_path }}/ { + proxy_pass {{ static_site.proxied_path }}/index.html; + } + + # Matches: //[.../] + location ~ ^/{{ static_site.router_path }}/((?:[\w\-]+\/+)*)([\w\-\.]+\.[\w\-\.]+) { + proxy_pass {{ static_site.proxied_path }}/$1$2; + } + + # Matches: ///[.../] + location ~ ^/{{ static_site.router_path }}/([a-z0-9-]+)[/]? { + proxy_pass {{ static_site.proxied_path }}/$1/index.html; + } + + {% endfor %} + + location /HealthCheck { + return 200; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/credentials.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/credentials.j2 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_exams.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_exams.j2 new file mode 100644 index 00000000000..b754bcb7497 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_exams.j2 @@ -0,0 +1,30 @@ +server { + server_name ~^((stage|prod)-)?edx-exams.*; + listen 80; + rewrite ^ https://$host$request_uri? permanent; +} +server { + server_name ~^((stage|prod)-)?edx-exams.*; + listen 443 ssl; + ssl_certificate /etc/ssl/certs/wildcard.sandbox.edx.org.pem; + ssl_certificate_key /etc/ssl/private/wildcard.sandbox.edx.org.key; + + location / { + try_files $uri @proxy_to_app; + } + location ~ ^/(api)/ { + try_files $uri @proxy_to_app; + } + location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Host $http_host; + proxy_redirect off; + proxy_pass http://127.0.0.1:18740; + } + location ~ ^/static/(?P.*) { + root /edx/var/edx_exams; + try_files /staticfiles/$file =404; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_notes_api.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_notes_api.j2 new file mode 100644 index 00000000000..5f94acdbb98 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_notes_api.j2 @@ -0,0 +1,63 @@ +{%- if "edx_notes_api" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +upstream {{ edx_notes_api_service_name }}_app_server { + {% for host in nginx_edx_notes_api_gunicorn_hosts %} + server {{ host }}:{{ edx_notes_api_gunicorn_port }} fail_timeout=0; + {% endfor %} +} + +server { + listen {{ edx_notes_api_nginx_port }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ edx_notes_api_nginx_port }} {{ default_site }}; + {% endif %} + + {% if NGINX_ENABLE_SSL %} + + listen {{ edx_notes_api_ssl_nginx_port }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ edx_notes_api_ssl_nginx_port }} ssl; + {% endif %} + + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + {% endif %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; + {% endif %} + + {% include "common-settings.j2" %} + + # Prevent invalid display courseware in IE 10+ with high privacy settings + add_header P3P '{{ NGINX_P3P_MESSAGE }}'; + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + server_name {{ EDX_NOTES_API_HOSTNAME }}; + + location / { + try_files $uri @proxy_to_app; + } + + {% include "robots.j2" %} + +location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://{{ edx_notes_api_service_name }}_app_server; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/empty_json.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/empty_json.j2 new file mode 100644 index 00000000000..572876d76a1 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/empty_json.j2 @@ -0,0 +1,9 @@ + location @empty_json { + # This location will return an empty body with content-type application/json + # If this location is referenced by the error_page directive the + # response code will be the error response code (i.e. 502), not 200 + # despite the "return 200" directive + default_type application/json; + return 200; + } + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/extra_locations_lms.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/extra_locations_lms.j2 new file mode 100644 index 00000000000..2cbd6fa017f --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/extra_locations_lms.j2 @@ -0,0 +1,11 @@ +{% if EDXAPP_SCORM_PKG_STORAGE_DIR %} + location ~ ^/{{ EDXAPP_MEDIA_URL }}/{{ EDXAPP_SCORM_PKG_STORAGE_DIR }}/(?P.*) { + add_header 'Access-Control-Allow-Origin' $cors_origin; + add_header 'Access-Control-Allow-Credentials' 'true'; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + + root {{ edxapp_media_dir }}/{{ EDXAPP_SCORM_PKG_STORAGE_DIR }}; + try_files /$file =404; + expires 604800s; + } +{% endif %} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/forum.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/forum.j2 new file mode 100644 index 00000000000..718a6e60211 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/forum.j2 @@ -0,0 +1,68 @@ +# +# {{ ansible_managed }} +# +{# This prevents the injected comment from eating the server + directive. There's probably a better way of doing this, + but I don't know it currently. +#} +{% raw %} + +{% endraw %} + +{%- if "forum" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +{% if devstack %} +{# Connects to webbrick on port 4567 typically. Appropriate for development deployments #} + +upstream forum_app_server { + server localhost:{{ forum_unicorn_port }} fail_timeout=0; +} +{% else %} +{# Connects to unicorn over a unix socket. Appropriate for production deployments #} + +upstream forum_app_server { + server unix:{{ forum_data_dir }}/forum.sock fail_timeout=0; +} +{% endif %} + +server { + + server_name forum.*; + listen {{ FORUM_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ FORUM_NGINX_PORT }} {{ default_site }}; + {% endif %} + client_max_body_size {{ NGINX_FORUM_CLIENT_MAX_BODY_SIZE }}; + proxy_read_timeout {{ NGINX_FORUM_PROXY_READ_TIMEOUT }}; + keepalive_timeout 5; + + location / { + try_files $uri @proxy_to_app; + } + + {% include "robots.j2" %} + +location @proxy_to_app { + {% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + {% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + {% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://forum_app_server; + } +} diff --git a/playbooks/roles/nginx/templates/gh_mirror.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/gh_mirror.j2 similarity index 86% rename from playbooks/roles/nginx/templates/gh_mirror.j2 rename to playbooks/roles/nginx/templates/edx/app/nginx/sites-available/gh_mirror.j2 index 4449ae02e7d..845992f8704 100644 --- a/playbooks/roles/nginx/templates/gh_mirror.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/gh_mirror.j2 @@ -1,5 +1,8 @@ server { listen {{ gh_mirror_nginx_port }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]: {{ gh_mirror_nginx_port }}; + {% endif %} server_name {{ gh_mirror_server_name }}; location ~ (/.*) { root {{ gh_mirror_data_dir }}; diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/gitreload.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/gitreload.j2 new file mode 100644 index 00000000000..86ab6d4e26e --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/gitreload.j2 @@ -0,0 +1,38 @@ +upstream gitreload_app_server { + {% for host in nginx_gitreload_gunicorn_hosts %} + server {{ host }}:{{ gitreload_gunicorn_port }} fail_timeout=0; + {% endfor %} +} + +server { + listen {{ GITRELOAD_NGINX_PORT }} default_server; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ GITRELOAD_NGINX_PORT }} default_server; + {% endif %} + + location / { + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + + try_files $uri @proxy_to_app; + } + + # No basic auth security on the queue status url, so that it can + # checked easily + location /queue{ + try_files $uri @proxy_to_app; + } + + {% include "robots.j2" %} + +location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://gitreload_app_server; + } +} + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/grafana.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/grafana.j2 new file mode 100644 index 00000000000..33cb6a1a38c --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/grafana.j2 @@ -0,0 +1,47 @@ +{%- if "grafana" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +# +# {{ ansible_managed }} +# + +upstream grafana_app_server { + server 127.0.0.1:3000 fail_timeout=0; +} + +server { + server_name grafana.*; + listen {{ GRAFANA_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ GRAFANA_NGINX_PORT }} {{ default_site }}; + {% endif %} + client_max_body_size 1M; + keepalive_timeout 5; + + location / { + try_files $uri @proxy_to_app; + } + + location @proxy_to_app { + {% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + {% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + {% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://grafana_app_server; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/graphite.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/graphite.j2 new file mode 100644 index 00000000000..cd94520db48 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/graphite.j2 @@ -0,0 +1,54 @@ +# +# {{ ansible_managed }} +# +{# This prevents the injected comment from eating the server + directive. There's probably a better way of doing this, + but I don't know it currently. +#} +{% raw %} + +{% endraw %} + +{%- if "graphite" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +upstream graphite_app_server { + server unix:{{ graphite_root }}/run/graphite-api.sock fail_timeout=0; +} + +server { + server_name graphite.*; + listen {{ GRAPHITE_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ GRAPHITE_NGINX_PORT }} {{ default_site }}; + {% endif %} + client_max_body_size 1M; + keepalive_timeout 5; + + location / { + try_files $uri @proxy_to_app; + } + + location @proxy_to_app { + {% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + {% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + {% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://graphite_app_server; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/handle-ip-disclosure.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/handle-ip-disclosure.j2 new file mode 100644 index 00000000000..f7267de637d --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/handle-ip-disclosure.j2 @@ -0,0 +1,12 @@ +# If you are changing this be warned that it lives in multiple places +# there is a TLS redirect to same box, and a TLS redirect to externally terminated TLS +# version of this in nginx and in edx_django_service role. + +{% if NGINX_ALLOW_PRIVATE_IP_ACCESS %} +# This regexp matches only public IP addresses. +if ($host ~ "(\d+)(?.*) { + root {{ COMMON_DATA_DIR }}/{{ insights_service_name }}; + add_header Cache-Control "max-age=31536000"; + add_header 'Access-Control-Allow-Origin' $cors_origin; + add_header 'Access-Control-Allow-Methods' 'HEAD, GET, OPTIONS'; + + # Inform downstream caches to take certain headers into account when reading/writing to cache. + add_header 'Vary' 'Accept-Encoding,Origin'; + + try_files /staticfiles/$file =404; + } + + location / { + try_files $uri @proxy_to_app; + } + + # No basic auth security on the heartbeat url, so that ELB can use it + location /status { + try_files $uri @proxy_to_app; + } + + {% include "robots.j2" %} + +{% if NGINX_ADMIN_ACCESS_CIDRS and INSIGHTS_ENABLE_ADMIN_URLS_RESTRICTION %} + location ~ ^/(admin) { + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} + allow {{ cidr }}; + {% endfor %} + deny all; + try_files $uri @proxy_to_app; + } +{% endif %} + +location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://insights_app_server; + } + + # Prevent invalid display courseware in IE 10+ with high privacy settings + add_header P3P '{{ NGINX_P3P_MESSAGE }}'; + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/jenkins.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/jenkins.j2 new file mode 100644 index 00000000000..d5769ff9f44 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/jenkins.j2 @@ -0,0 +1,34 @@ +server { + listen {{ jenkins_nginx_port }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ jenkins_nginx_port }}; + {% endif %} + server_name {{ jenkins_server_name }}; + {%- if jenkins_protocol_https %} + if ($http_x_forwarded_proto = "http") { + return 301 https://$host$request_uri; + } + {%- endif %} + location / { + proxy_pass http://localhost:{{ jenkins_port }}; + + {% if jenkins_protocol_https %} + # Rewrite HTTPS requests from WAN to HTTP requests on LAN + proxy_redirect http:// https://; + {% endif %} + + # The following settings from https://wiki.jenkins-ci.org/display/JENKINS/Running+Hudson+behind+Nginx + sendfile off; + + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_connect_timeout 150; + proxy_send_timeout 300; + proxy_read_timeout 300; + proxy_buffers 4 32k; + client_max_body_size 16m; + client_body_buffer_size 128k; + + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/kibana.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/kibana.j2 new file mode 100755 index 00000000000..c90fafc6ba1 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/kibana.j2 @@ -0,0 +1,81 @@ +{%- if "kibana" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +upstream elasticsearch_server { + server 127.0.0.1:9200; +} + +server { + # Kibana server, templated by ansible + + {% if NGINX_ENABLE_SSL %} + + listen {{ KIBANA_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ KIBANA_NGINX_PORT }} {{ default_site }}; + {% endif %} + listen {{ KIBANA_SSL_NGINX_PORT }} {{ default_site }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ KIBANA_SSL_NGINX_PORT }} {{ default_site }} ssl; + {% endif %} + + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + + {% else %} + listen {{ KIBANA_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ KIBANA_NGINX_PORT }} {{ default_site }}; + {% endif %} + {% endif %} + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + server_name {{ KIBANA_SERVER_NAME }}; + + root {{ kibana_app_dir }}/htdocs; + + access_log {{ nginx_log_dir }}/kibana.access.log; + error_log {{ nginx_log_dir }}/kibana.error.log error; + + # Access restriction + {% if KIBANA_ENABLE_BASIC_AUTH|bool %} + {% include "basic-auth.j2" %} + {% endif %} + + # Set image format types to expire in a very long time + location ~* ^.+\.(jpg|jpeg|gif|png|ico)$ { + access_log off; + expires max; + } + + # Set css and js to expire in a very long time + location ~* ^.+\.(css|js)$ { + access_log off; + expires max; + } + + # Elastic Search + location /e { + rewrite /e/(.*) /$1 break; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + + proxy_redirect off; + proxy_pass http://elasticsearch_server; + } + + # Kibana + location / { + root {{ kibana_app_dir }}/htdocs; + index index.html; + expires 1d; + try_files $uri/ $uri; + if (-f $request_filename) { + break; + } + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/learner_portal.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/learner_portal.j2 new file mode 100644 index 00000000000..84c79da639e --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/learner_portal.j2 @@ -0,0 +1,32 @@ +{%- if "learner_portal" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + listen {{ LEARNER_PORTAL_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ LEARNER_PORTAL_NGINX_PORT }} {{ default_site }}; + {% endif %} + server_name ~^((stage|prod)-)?learner-portal.*; + location / { + root /edx/app/learner_portal/learner_portal/dist; + index index.html; + } +} + +server { + listen {{ LEARNER_PORTAL_SSL_NGINX_PORT }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ LEARNER_PORTAL_SSL_NGINX_PORT }} ssl; + {% endif %} + server_name ~^((stage|prod)-)?learner-portal.*; + ssl_certificate /etc/ssl/certs/wildcard.sandbox.edx.org.pem; + ssl_certificate_key /etc/ssl/private/wildcard.sandbox.edx.org.key; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + location / { + root /edx/app/learner_portal/learner_portal/dist; + index index.html; + } +} \ No newline at end of file diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 new file mode 100644 index 00000000000..2dbc86f3212 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 @@ -0,0 +1,333 @@ +{%- if "lms" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +upstream lms-backend { + {% for host in nginx_lms_gunicorn_hosts %} + server {{ host }}:{{ edxapp_lms_gunicorn_port }} fail_timeout=0; + {% endfor %} + +} + +{%- if EDXAPP_ENABLE_RATE_LIMITING -%} +# Make Zone +limit_req_zone $cookie_{{ EDXAPP_SESSION_COOKIE_NAME }} zone=cookies:10m rate={{ EDXAPP_COURSES_REQUEST_RATE }}; + +{% for agent in EDXAPP_RATE_LIMITED_USER_AGENTS %} + +# Map of http user agent with name limit_bot_agent_alias having binary IP of the agent +map $http_user_agent {{ "$limit_bot_" ~ agent.alias }} { + {{ agent.agent_name }} $binary_remote_addr; + } + +limit_req_zone {{ "$limit_bot_" ~ agent.alias }} zone=agents:10m rate={{ agent.rate }}; +{% endfor %} + +{%- endif %} + + +{% if NGINX_EDXAPP_EMBARGO_CIDRS %} + {%- if NGINX_SET_X_FORWARDED_HEADERS %} +geo $remote_addr $embargo { + {%- else %} +geo $http_x_forwarded_for $embargo { + {% endif -%} + default 0; + + {% for cidr in NGINX_EDXAPP_EMBARGO_CIDRS -%} + {{ cidr }} 1; + {% endfor %} + +} +{%- endif %} + + +{% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + # The Origin request header indicates where a fetch originates from. It doesn't include any path information, + # but only the server name (e.g. https://www.example.com). + # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin for details. + # + # Here we set the value that is included in the Access-Control-Allow-Origin response header. If the origin is one + # of our known hosts--served via HTTP or HTTPS--we allow for CORS. Otherwise, we set the "null" value, disallowing CORS. + map $http_origin $cors_origin { + default "null"; + {% for host in EDXAPP_CORS_ORIGIN_WHITELIST %} + "~*^https?:\/\/{{ host|replace('.', '\.') }}$" $http_origin; + {% endfor %} + } +{% endif %} + + +server { + # LMS configuration file for nginx, templated by ansible + + {% if NGINX_EDXAPP_ENABLE_S3_MAINTENANCE %} + + # Do not include a 502 error in NGINX_ERROR_PAGES when + # NGINX_EDXAPP_ENABLE_S3_MAINTENANCE is enabled. + + # Return a 503 instead so that it passes through Cloudflare + error_page 502 =503 @maintenance; + + {% include "s3_maintenance.j2" %} + + {% endif %} + + # error pages + {% for k, v in NGINX_EDXAPP_ERROR_PAGES.items() %} +error_page {{ k }} {{ v }}; + {% endfor %} + + {% if NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + +{% include "empty_json.j2" %} + + listen {{ EDXAPP_LMS_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ EDXAPP_LMS_NGINX_PORT }} {{ default_site }}; + {% endif %} + + {% if NGINX_ENABLE_SSL %} + listen {{ EDXAPP_LMS_SSL_NGINX_PORT }} {{ default_site }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ EDXAPP_LMS_SSL_NGINX_PORT }} {{ default_site }} ssl; + {% endif %} + + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + {% endif %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; + {% endif %} + + # prevent the browser from doing MIME-type sniffing + add_header X-Content-Type-Options nosniff; + + # Prevent invalid display courseware in IE 10+ with high privacy settings + add_header P3P '{{ NGINX_P3P_MESSAGE }}'; + + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + # To track requests + add_header X-Request-ID $request_tracking_id; + {% endif %} + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }}; + error_log {{ nginx_log_dir }}/error.log error; + + # Some Master's courses require submissions up to 20MB in size. + # CMS requires larger value for course assets, values provided + # via hiera. + client_max_body_size {{ NGINX_LMS_CLIENT_MAX_BODY_SIZE }}; + proxy_read_timeout {{ NGINX_LMS_PROXY_READ_TIMEOUT }}; + + rewrite ^(.*)/favicon.ico$ {{ NGINX_EDXAPP_FAVICON_PATH }} last; + + {% include "python_lib.zip.j2" %} + {% include "common-settings.j2" %} + + {% if NGINX_EDXAPP_EMBARGO_CIDRS -%} + #only redirect to embargo when $embargo == true and $uri != $embargo_url + #this is a hack to do multiple conditionals + set $embargo_url "/embargo/blocked-message/courseware/embargo/"; + if ( $embargo ) { + set $do_embargo "A"; + } + if ( $uri != $embargo_url ) { + set $do_embargo "${do_embargo}B"; + } + if ( $do_embargo = "AB" ) { + return 302 $embargo_url; + } + {% endif -%} + + location @proxy_to_lms_app { +{% include "lms_proxy.j2" %} + + } + + location @proxy_to_lms_app_api { + error_page 504 @empty_json; + error_page 502 @empty_json; + error_page 500 @empty_json; + +{% include "lms_proxy.j2" %} + } + + location / { + {% if EDXAPP_LMS_ENABLE_BASIC_AUTH|bool %} + {% include "basic-auth.j2" %} + {% endif %} + + try_files $uri @proxy_to_lms_app; + } + + # /login?next= can be used by 3rd party sites in tags to + # determine whether a user on their site is logged into edX. + # The most common image to use is favicon.ico. + location /login { + if ( $arg_next ~* "favicon.ico" ) { + access_log off; + return 403; + } + + try_files $uri @proxy_to_lms_app; + } + +{% if NGINX_EDXAPP_EMBARGO_CIDRS %} + location $embargo_url { + try_files $uri @proxy_to_lms_app; + } +{% endif %} + + # No basic auth for /segmentio/event + location /segmentio/event { + try_files $uri @proxy_to_lms_app; + } + + # The api is accessed using OAUTH2 which + # uses the authorization header so we can't have + # basic auth on it as well. + location /api { + try_files $uri @proxy_to_lms_app_api; + } + + location /login_refresh { + try_files $uri @proxy_to_lms_app_api; + } + + # CSRF token API + location /csrf/api { + try_files $uri @proxy_to_lms_app_api; + } + + #enterprise API + location /enterprise/api { + try_files $uri @proxy_to_lms_app_api; + } + + # Consent API + location /consent/api { + try_files $uri @proxy_to_lms_app_api; + } + + # Need a separate location for the image uploads endpoint to limit upload sizes + location ~ ^/api/profile_images/[^/]*/[^/]*/upload$ { + try_files $uri @proxy_to_lms_app_api; + client_max_body_size {{ EDXAPP_PROFILE_IMAGE_MAX_BYTES + 1000 }}; + } + + location /user_api { + try_files $uri @proxy_to_lms_app_api; + } + + # No basic auth security on the github_service_hook url, so that github can use it for cms + location /github_service_hook { + try_files $uri @proxy_to_lms_app; + } + + # No basic auth security on oauth2 endpoint + location /oauth2 { + try_files $uri @proxy_to_lms_app; + } + + # No basic auth security on third party auth endpoints + location /auth { + try_files $uri @proxy_to_lms_app; + } + + # No basic auth on the XBlock View endpoint, which can use OAuth2 + location ~ ^/courses/.*/xblock/.*/view/ { + try_files $uri @proxy_to_lms_app; + } + + # No basic auth on XBlock handlers, which can use OAuth2 + location ~ ^/courses/.*/xblock/.*/handler/ { + try_files $uri @proxy_to_lms_app; + } + + # No basic auth security on assets + location /c4x { + try_files $uri @proxy_to_lms_app; + } + + location /asset { + {% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + add_header Access-Control-Allow-Origin $cors_origin; + {% endif %} + try_files $uri @proxy_to_lms_app; + } + + # No basic auth security on the heartbeat url, so that ELB can use it + location /heartbeat { + # If /edx/var/nginx/server-static/maintenance_heartbeat.txt exists serve an + # empty 200 so the instance stays in the load balancer to serve the + # maintenance page + if (-f /edx/var/nginx/server-static/maintenance_heartbeat.txt) { + return 200; + } + try_files $uri @proxy_to_lms_app; + } + + # No basic auth on the LTI provider endpoint, it does OAuth1 + location /lti_provider { + try_files $uri @proxy_to_lms_app; + } + + # No basic auth on LTI component grade. + location ~ /handler_noauth { + try_files $uri @proxy_to_lms_app; + } + + location /courses { + {%- if EDXAPP_ENABLE_RATE_LIMITING -%} + # Set Limit + limit_req zone=cookies burst={{ EDXAPP_COURSES_REQUEST_BURST_RATE }}; + + {%- if EDXAPP_RATE_LIMITED_USER_AGENTS|length > 0 %} + limit_req zone=agents burst={{ EDXAPP_COURSES_USER_AGENT_BURST_RATE }}; + {%- endif %} + error_page 503 = /server/rate-limit.html; + {%- endif -%} + + {% if EDXAPP_LMS_ENABLE_BASIC_AUTH|bool %} + {%- include "basic-auth.j2" %} + {% endif %} + try_files $uri @proxy_to_lms_app; + } + + # No basic auth, uses oauth2 for authentication + location /v1/accounts/gdpr_retire_users { + try_files $uri @proxy_to_lms_app; + } + +location ~ ^{{ EDXAPP_MEDIA_URL }}/(?P.*) { + root {{ edxapp_media_dir }}; + try_files /$file =404; + expires {{ EDXAPP_PROFILE_IMAGE_MAX_AGE }}s; +} + +{% if NGINX_ADMIN_ACCESS_CIDRS and EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION %} + location /admin { + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} + allow {{ cidr }}; + {% endfor %} + deny all; + try_files $uri @proxy_to_lms_app; + } +{% endif %} + + {% include "robots.j2" %} + {% include "static-files.j2" %} + {% include "extra_locations_lms.j2" ignore missing %} + +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms_proxy.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms_proxy.j2 new file mode 100644 index 00000000000..4c6c75676f8 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms_proxy.j2 @@ -0,0 +1,31 @@ +{% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + proxy_set_header X-Request-ID $request_tracking_id; + {% endif %} +{% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + proxy_set_header X-Request-ID $request_tracking_id; + {% endif %} +{% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://lms-backend; + +{% if EDXAPP_SET_PROXY_BUFFER_SIZE %} + proxy_buffer_size {{EDXAPP_PROXY_BUFFER_SIZE}}; + proxy_buffers {{EDXAPP_PROXY_BUFFERS_NUMBER}} {{EDXAPP_PROXY_BUFFERS_SIZE}}; + proxy_busy_buffers_size {{EDXAPP_PROXY_BUSY_BUFFERS_SIZE}}; +{% endif %} + + {{ NGINX_EDXAPP_LMS_APP_EXTRA }} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/maps.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/maps.j2 new file mode 100644 index 00000000000..23c371bed02 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/maps.j2 @@ -0,0 +1,16 @@ +# nginx maps are defined at the top level and are global +# +# THESE ARE GLOBAL TO ALL IDAs, USE CAUTION WHEN DEFINING HERE +# SEE https://github.com/openedx/configuration/pull/5056 FOR A +# CAUTIONARY TALE + +# cache header for static files +map $status $cache_header_long_lived { + default "max-age=315360000"; + 404 "no-cache"; +} + +map $status $cache_header_short_lived { + default "max-age=300"; + 404 "no-cache"; +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/nginx_redirect.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/nginx_redirect.j2 new file mode 100644 index 00000000000..b01fbc123d0 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/nginx_redirect.j2 @@ -0,0 +1,28 @@ +{%- if "default" in item.value -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + listen {{ REDIRECT_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ REDIRECT_NGINX_PORT }} {{ default_site }}; + {% endif %} + + {% if "ssl" in item.value and item.value['ssl'] == true -%} + listen {{ REDIRECT_SSL_NGINX_PORT }} {{ default_site }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ REDIRECT_SSL_NGINX_PORT }} {{ default_site }} ssl; + {% endif %} + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + {% endif -%} + + server_name {% for server in item.value['server_names'] %} + + {{ server }}{% endfor -%}; + + + return 301 {{ item.value['redirect_destination'] }}$request_uri; +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/program_console.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/program_console.j2 new file mode 100644 index 00000000000..775ecedf8a4 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/program_console.j2 @@ -0,0 +1,35 @@ +{%- if "program_console" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + listen {{ PROGRAM_CONSOLE_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ PROGRAM_CONSOLE_NGINX_PORT }} {{ default_site }}; + {% endif %} + server_name ~^((stage|prod)-)?program-console.*; + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + location / { + root /edx/app/program-console/program-console/dist; + index index.html; + } +} + +server { + listen {{ PROGRAM_CONSOLE_SSL_NGINX_PORT }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ PROGRAM_CONSOLE_SSL_NGINX_PORT }} ssl; + {% endif %} + server_name ~^((stage|prod)-)?program-console.*; + ssl_certificate /etc/ssl/certs/wildcard.sandbox.edx.org.pem; + ssl_certificate_key /etc/ssl/private/wildcard.sandbox.edx.org.key; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + location / { + root /edx/app/program-console/program-console/dist; + index index.html; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/prospectus.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/prospectus.j2 new file mode 100644 index 00000000000..5f64e56dab1 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/prospectus.j2 @@ -0,0 +1,285 @@ +{%- if "prospectus" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + + +{% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED and PROSPECTUS_S3_HOSTING_PROXY_CACHE_ENABLED %} +proxy_cache_path {{ nginx_server_cache_dir }} levels=1:2 keys_zone=STATIC:10m max_size=10g use_temp_path=off; +{% endif %} + +server { + # Prospectus configuration file for nginx, templated by ansible + + server_name ~^((stage|prod)-)?prospectus.*; + + add_header X-Frame-Options DENY; + +{% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + resolver 127.0.0.53; +{% if PROSPECTUS_S3_HOSTING_PROXY_CACHE_ENABLED %} + proxy_cache STATIC; + proxy_cache_valid any 1m; + add_header X-nginx-cache-status $upstream_cache_status; +{% endif %} +{% endif %} + + {% if NGINX_PROSPECTUS_DISABLE_INDEXING %} + + add_header X-Robots-Tag "noindex, nofollow" always; + + {% endif %} + + {% if NGINX_ENABLE_SSL %} + + listen {{ prospectus_ssl_nginx_port }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ prospectus_ssl_nginx_port }} ssl; + {% endif %} + + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + {% endif %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; + {% endif %} + + + {% if NGINX_PROSPECTUS_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + + listen {{ PROSPECTUS_NGINX_PORT }} {{ default_site }}; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ PROSPECTUS_NGINX_PORT }} {{ default_site }}; + {% endif %} + + root {{ PROSPECTUS_DATA_DIR }}; + + # Do not cache any 404 documents at the CDN for all pages.. + error_page 404 /404.html; + location = /404.html { + root {{ PROSPECTUS_DATA_DIR }}; + add_header Cache-Control "no-store, max-age=0" always; + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}/404.html; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + {% endif %} + } + + location /event { + # this page is designed to be injected into other pages via an iframe + add_header X-Frame-Options ''; + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}$request_uri; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + {% endif %} + } + + # Look for hashed .css, .js and .map files in bucket/static_hashed to prevent 404 when cloudflare cache is cleared + # Regex only looks in root path, which are the only .css, .js and .map files with content hash filenames + location ~* ^/[^/]+\.(css|js|map)$ { + # Cache js/css for a long time at the edge, they are versioned in their names + add_header 'Cache-Control' 'public, max-age=31536000, immutable'; + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/static_hashed$request_uri; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + {% endif %} + } + + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + # Look for /page-data/sq/d/*.js files in bucket/static_hashed to prevent 404 when cloudflare cache is cleared + location /page-data/sq/d/ { + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/static_hashed$request_uri; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + } + + # Look for /static/* files in bucket/static_hashed to prevent 404 when cloudflare cache is cleared + location /static/ { + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/static_hashed$request_uri; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + } + {% endif %} + + # images sometimes change, we want to cache them for an hour at the edge to reduce bandwidth. + + location /images/ { + add_header 'Cache-Control' 'public, max-age=3600'; + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}$request_uri; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + {% endif %} + } + + # favicon is requested a lot. cache it at the edge. + + location /favicon.ico { + add_header 'Cache-Control' 'public, max-age=86400'; + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}$request_uri; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + {% endif %} + } + + + # Ignore the rollout group headers for the health check endpoint. + location /HealthCheck { + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}/HealthCheck/index.html; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + # proxy_redirect ensures redirects from s3 are rewritten + # For example it will fix a redirect from s3 to prevent /school/mitx from trying to redirect to /924c142-1/school/mitx/ + # The second parameter being " " is to prevent nginx sticking http://hostname in front of the location directive + proxy_redirect "/{{ PROSPECTUS_S3_HOSTING_PREFIX }}" " "; + {% else %} + try_files $uri $uri/index.html; + {% endif %} + } + location /es/bio/ { + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + rewrite ^ /{{ PROSPECTUS_S3_HOSTING_PREFIX }}/es/bio/index.html break; + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}/es/bio/index.html; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + # proxy_redirect ensures redirects from s3 are rewritten + # For example it will fix a redirect from s3 to prevent /school/mitx from trying to redirect to /924c142-1/school/mitx/ + # The second parameter being " " is to prevent nginx sticking http://hostname in front of the location directive + proxy_redirect "/{{ PROSPECTUS_S3_HOSTING_PREFIX }}" " "; + {% else %} + try_files $uri $uri/ /es/bio/index.html; + {% endif %} + } + + location /bio/ { + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + rewrite ^ /{{ PROSPECTUS_S3_HOSTING_PREFIX }}/bio/index.html break; + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}/bio/index.html; + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + # proxy_redirect ensures redirects from s3 are rewritten + # For example it will fix a redirect from s3 to prevent /school/mitx from trying to redirect to /924c142-1/school/mitx/ + # The second parameter being " " is to prevent nginx sticking http://hostname in front of the location directive + proxy_redirect "/{{ PROSPECTUS_S3_HOSTING_PREFIX }}" " "; + {% else %} + try_files $uri $uri/ /bio/index.html; + {% endif %} + } + + # preview pages are not cached, and have basic auth + + location /secure-preview/ { + {% if PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH|bool %} + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + {% endif %} + port_in_redirect off; + add_header Cache-Control "no-store, max-age=0"; + } + + location /es/secure-preview/ { + {% if PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH|bool %} + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + {% endif %} + port_in_redirect off; + add_header Cache-Control "no-store, max-age=0"; + } + + location /preview/ { + port_in_redirect off; + add_header Cache-Control "no-store, max-age=0"; + } + + location /es/preview/ { + port_in_redirect off; + add_header Cache-Control "no-store, max-age=0"; + } + + # PROSPECTUS_TEMPORARY_REDIRECTS will be a list of dictionaries which have: + # - from_path: The path of the course-about page that you want redirect + # - to_path: URL to redirect to + {% for redirect in PROSPECTUS_TEMPORARY_REDIRECTS %} + location = {{ redirect.from_path }} { + include fastcgi_params; + fastcgi_param SERVER_PORT 80; + port_in_redirect off; + return 302 {{ redirect.to_path }}; + } + {% endfor %} + + + + {% if prospectus_redirect_file is defined %} + include fastcgi_params; + fastcgi_param SERVER_PORT 80; + include {{ prospectus_redirect_file }}; + port_in_redirect off; + {% endif %} + + add_header 'Cache-Control' 'public, max-age=1800'; + try_files $uri $uri/index.html =404; + + {% if PROSPECTUS_S3_HOSTING_PROXY_ENABLED %} + # Redirect URLs with trailing slashes to non-trailing slash equivalents + # Gatsby is setup to have the canonical URLs be the versions without trailing slashes + location ~ ^.+/$ { + # absolute_redirect off prevents adding http://localhost to the redirect + # We want a relative redirect, i.e. /learn/ -> /924c142-1/learn, not to http://localhost/924c142-1/learn + absolute_redirect off; + rewrite ^/(.*)/$ /$1 permanent; + } + + location / { + # absolute_redirect off prevents adding http://localhost to the redirect + # We want a relative redirect, i.e. /learn/ -> /924c142-1/learn, not to http://localhost/924c142-1/learn + absolute_redirect off; + # Adds trailing slash that S3 requires in order to serve index.html from a folder + # For instance in order to server edx.org/learn we need to ask s3 for {{ PROSPECTUS_S3_HOSTING_PREFIX }}/learn/ + rewrite ^/((.*/)*[^.]*[^/])$ /{{ PROSPECTUS_S3_HOSTING_PREFIX }}/$1/ break; + + proxy_pass {{ PROSPECTUS_S3_HOSTING_BUCKET_URL }}/{{ PROSPECTUS_S3_HOSTING_PREFIX }}/; + + # Hide client headers from S3 to prevent request headers too big error + proxy_pass_request_headers off; + + # proxy_redirect ensures redirects from s3 are rewritten + # For example it will fix a redirect from s3 to prevent /school/mitx from trying to redirect to /924c142-1/school/mitx/ + # The second parameter being " " is to prevent nginx sticking http://hostname in front of the location directive + proxy_redirect "/{{ PROSPECTUS_S3_HOSTING_PREFIX }}" " "; + } + {% endif %} + + # PROSPECTUS_STATIC_SITES will be a list of dictionaries which have a: + # - router_path: The path you will go to on the router to access the content + # - proxied_path: The path to proxy the requests to + {% for static_site in PROSPECTUS_STATIC_SITES %} + + # Matches: // + location = /{{ static_site.router_path }}/ { + proxy_pass {{ static_site.proxied_path }}/index.html; + } + + # Matches: /[.../] + location ~ ^/{{ static_site.router_path }}/((?:\w+\/+)*)([\w\-\.]+\.[\w\-\.]+) { + proxy_pass {{ static_site.proxied_path }}/$1$2; + } + + # Matches: //[.../] + location ~ ^/{{ static_site.router_path }}/([a-z0-9-]+)[/]? { + proxy_pass {{ static_site.proxied_path }}/$1/index.html; + } + + {% endfor -%} +} + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/python_lib.zip.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/python_lib.zip.j2 new file mode 100644 index 00000000000..1d0da2f8e08 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/python_lib.zip.j2 @@ -0,0 +1,9 @@ + + # Blackholes an archive of python library files that instructors + # may provide for sandboxed python problem types, the internal + # directive will result in nginx emitting an nginx 404. Users + # will not be redirected to the application 404 page. + location ~* python_lib.zip { + internal; + } + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/robots.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/robots.j2 new file mode 100644 index 00000000000..6352497319e --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/robots.j2 @@ -0,0 +1,6 @@ +{% if NGINX_ROBOT_RULES|length > 0 %} + location /robots.txt { + root {{ nginx_app_dir }}; + try_files $uri /robots.txt =404; + } +{% endif %} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/s3_maintenance.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/s3_maintenance.j2 new file mode 100644 index 00000000000..e9cb9cc6506 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/s3_maintenance.j2 @@ -0,0 +1,15 @@ +{% if NGINX_EDXAPP_ENABLE_S3_MAINTENANCE %} +location @maintenance { + rewrite ^(.*) {{ NGINX_EDXAPP_S3_MAINTENANCE_FILE }} break; + proxy_http_version 1.1; + proxy_set_header Host s3.amazonaws.com; + proxy_set_header Authorization ''; + proxy_hide_header x-amz-id-2; + proxy_hide_header x-amz-request-id; + proxy_hide_header Set-Cookie; + proxy_ignore_headers "Set-Cookie"; + proxy_buffering off; + proxy_intercept_errors on; + proxy_pass https://s3.amazonaws.com; +} +{% endif %} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files-extra.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files-extra.j2 new file mode 100644 index 00000000000..769a6713a4f --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files-extra.j2 @@ -0,0 +1,13 @@ +{% if EDXAPP_SCORM_PLAYER_LOCAL_STORAGE_ROOT %} + # w/in scorm/, override default return 403 for these file types + location ~ ^/static/scorm/(?:.*)(?:\.xml|\.json) { + try_files /{{ EDXAPP_SCORM_PLAYER_LOCAL_STORAGE_ROOT }}/$file =404; + } + + location ~ "/scorm/(?P.*)" { + add_header 'Access-Control-Allow-Origin' $cors_origin; + add_header 'Access-Control-Allow-Credentials' 'true'; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + try_files /{{ EDXAPP_SCORM_PLAYER_LOCAL_STORAGE_ROOT }}/$file =404; + } +{% endif %} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files.j2 new file mode 100644 index 00000000000..f2cc1966324 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files.j2 @@ -0,0 +1,57 @@ + # static pages for server status + location ~ ^/server/(?P.*) { + root /edx/var/nginx/server-static; + try_files /$file =404; + } + + location ~ ^/static/(?P.*) { + root {{ edxapp_data_dir }}; + try_files /staticfiles/$file /course_static/$file =404; + +{% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + add_header Access-Control-Allow-Origin $cors_origin; +{% endif %} + + # return a 403 for static files that shouldn't be + # in the staticfiles directory + location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) { + return 403; + } + + # http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default + location ~ "/static/(?P.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff|woff2)$)" { + add_header "Cache-Control" $cache_header_long_lived always; + + # Prevent the browser from doing MIME-type sniffing + add_header X-Content-Type-Options nosniff; + +{% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + add_header Access-Control-Allow-Origin $cors_origin; +{% endif %} + try_files /staticfiles/$collected /course_static/$collected =404; + } + + # Set django-pipelined files to maximum cache time + location ~ "/static/(?P.*\.[0-9a-f]{12}\..*)" { + add_header "Cache-Control" $cache_header_long_lived always; + # Without this try_files, files that have been run through + # django-pipeline return 404s + try_files /staticfiles/$collected /course_static/$collected =404; + } + + # Set django-pipelined files for studio to maximum cache time + location ~ "/static/(?P[0-9a-f]{7}/.*)" { + add_header "Cache-Control" $cache_header_long_lived always; + + # Without this try_files, files that have been run through + # django-pipeline return 404s + try_files /staticfiles/$collected /course_static/$collected =404; + } + + {% include "static-files-extra.j2" ignore missing %} + + # Non-hashed files (there should be very few / none of these) + add_header "Cache-Control" $cache_header_short_lived always; + } + + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/subscriptions.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/subscriptions.j2 new file mode 100644 index 00000000000..35841adbe51 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/subscriptions.j2 @@ -0,0 +1,30 @@ +server { + server_name ~^((stage|prod)-)?subscriptions.*; + listen 80; + rewrite ^ https://$host$request_uri? permanent; +} +server { + server_name ~^((stage|prod)-)?subscriptions.*; + listen 443 ssl; + ssl_certificate /etc/ssl/certs/wildcard.sandbox.edx.org.pem; + ssl_certificate_key /etc/ssl/private/wildcard.sandbox.edx.org.key; + + location / { + try_files $uri @proxy_to_app; + } + location ~ ^/(api)/ { + try_files $uri @proxy_to_app; + } + location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Host $http_host; + proxy_redirect off; + proxy_pass http://127.0.0.1:18750; + } + location ~ ^/static/(?P.*) { + root /edx/var/subscriptions; + try_files /staticfiles/$file =404; + } +} \ No newline at end of file diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/xqueue.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/xqueue.j2 new file mode 100644 index 00000000000..28d96272dd4 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/xqueue.j2 @@ -0,0 +1,66 @@ +upstream xqueue_app_server { + {% for host in nginx_xqueue_gunicorn_hosts %} + server {{ host }}:{{ xqueue_gunicorn_port }} fail_timeout=0; + {% endfor %} +} + +server { + listen {{ XQUEUE_NGINX_PORT }} default_server; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ XQUEUE_NGINX_PORT }} default_server; + {% endif %} + + {% if NGINX_ENABLE_SSL %} + listen {{ XQUEUE_NGINX_SSL_PORT }} ssl; + {% if NGINX_ENABLE_IPV6 %} + listen [::]:{{ XQUEUE_NGINX_SSL_PORT }} ssl; + {% endif %} + + {% include "common-settings.j2" %} + + access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }}; + error_log {{ nginx_log_dir }}/error.log error; + + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + {% endif %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + {% endif %} + + # set xqueue upload limit to 20MB to match the LMS upload limit. + client_max_body_size 20M; + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + location / { + {% if XQUEUE_ENABLE_BASIC_AUTH|bool %} + {% include "basic-auth.j2" %} + {% endif %} + try_files $uri @proxy_to_app; + } + + # No basic auth security on the heartbeat url, so that ELB can use it + location /xqueue/status/{ + try_files $uri @proxy_to_app; + } + + {% include "robots.j2" %} + +location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://xqueue_app_server; + } +} + diff --git a/playbooks/roles/nginx/templates/edx/var/nginx/server-static/server-template.j2 b/playbooks/roles/nginx/templates/edx/var/nginx/server-static/server-template.j2 new file mode 100644 index 00000000000..5ca28cf9742 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/var/nginx/server-static/server-template.j2 @@ -0,0 +1,23 @@ + + + + + {{ item.title }} + + + + + +
+

{{ item.heading }}

+ {{ item.img_alt }} +

{{ item.title }}

+

{{ item.msg }}

+
+ + + diff --git a/playbooks/roles/nginx/templates/edx_logrotate_nginx_access.j2 b/playbooks/roles/nginx/templates/etc/logrotate.d/edx_logrotate_nginx_access.j2 similarity index 100% rename from playbooks/roles/nginx/templates/edx_logrotate_nginx_access.j2 rename to playbooks/roles/nginx/templates/etc/logrotate.d/edx_logrotate_nginx_access.j2 diff --git a/playbooks/roles/nginx/templates/edx_logrotate_nginx_error.j2 b/playbooks/roles/nginx/templates/etc/logrotate.d/edx_logrotate_nginx_error.j2 similarity index 100% rename from playbooks/roles/nginx/templates/edx_logrotate_nginx_error.j2 rename to playbooks/roles/nginx/templates/etc/logrotate.d/edx_logrotate_nginx_error.j2 diff --git a/playbooks/roles/nginx/templates/etc/nginx/nginx.conf.j2 b/playbooks/roles/nginx/templates/etc/nginx/nginx.conf.j2 new file mode 100644 index 00000000000..1c3494e5434 --- /dev/null +++ b/playbooks/roles/nginx/templates/etc/nginx/nginx.conf.j2 @@ -0,0 +1,148 @@ +user www-data; +worker_processes 4; +pid /var/run/nginx.pid; + +events { + worker_connections 768; + # multi_accept on; +} + +http { + + ## + # Basic Settings + ## + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + {% if NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE %} + map_hash_max_size {{ NGINX_MAP_HASH_MAX_SIZE }}; + map_hash_bucket_size {{ NGINX_MAP_HASH_BUCKET_SIZE }}; + {% endif %} + # increase header buffer for for https://edx-wiki.atlassian.net/browse/LMS-467> + # see http://orensol.com/2009/01/18/nginx-and-weird-400-bad-request-responses/ + large_client_header_buffers 8 16k; + server_tokens off; + + {% if NGINX_OVERRIDE_DEFAULT_SERVER_NAMES_HASH_SIZE %} + server_names_hash_bucket_size {{ NGINX_SERVER_NAMES_HASH_BUCKET_SIZE }}; + {% endif %} + # server_name_in_redirect off; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + # Setting variables based on HTTP headers to track and differentiate requests across components: the first creates a new + # variable called trace_id based on X-REQUEST-ID or Cloudflare CF-ray headers, while the second creates a variable named + # request_tracking_id based on whether trace_id is set or not. + map $http_x_request_id $trace_id { + "" "${http_cf_ray}"; + default "${http_x_request_id}"; + } + map $trace_id $request_tracking_id { + "" "${request_id}"; + default $trace_id; + } + {% endif %} + + ## + # Logging Settings + ## + + log_format p_combined '$http_x_forwarded_for - $remote_addr - $remote_user $http_x_forwarded_proto [$time_local] ' + '"$request" $status $body_bytes_sent $request_time ' + '"$http_referer" "$http_user_agent"'; + + log_format ssl_combined '$remote_addr - $ssl_client_s_dn - "$upstream_addr" [$time_local] ' + '"$request" $status $body_bytes_sent $request_time ' + '"$http_referer" "$http_user_agent"'; + + log_format app_metrics 'time=$time_iso8601 client=$remote_addr method=$request_method request="$request" ' + 'request_length=$request_length status=$status bytes_sent=$bytes_sent body_bytes_sent=$body_bytes_sent ' + 'referer=$http_referer user_agent="$http_user_agent" upstream_addr=$upstream_addr upstream_status=$upstream_status ' + 'request_time=$request_time request_id=$request_id upstream_response_time=$upstream_response_time ' + 'upstream_connect_time=$upstream_connect_time upstream_header_time=$upstream_header_time'; + + {% if NGINX_ENABLE_REQUEST_TRACKING_ID -%} + log_format cf_custom '$http_x_forwarded_for - $remote_addr - $remote_user $http_x_forwarded_proto [$time_local] $request_tracking_id ' + '"$request" $status $body_bytes_sent $request_time ' + '"$http_referer" "$http_user_agent"'; + {% endif %} + + log_format json_analytics escape=json '{' + '"msec": "$msec", ' # request unixtime in seconds with a milliseconds resolution + '"connection": "$connection", ' # connection serial number + '"connection_requests": "$connection_requests", ' # number of requests made in connection + '"pid": "$pid", ' # process pid + '"request_id": "$request_id", ' # the unique request id + '"request_length": "$request_length", ' # request length (including headers and body) + '"remote_addr": "$remote_addr", ' # client IP + '"remote_user": "$remote_user", ' # client HTTP username + '"remote_port": "$remote_port", ' # client port + '"time_local": "$time_local", ' + '"time_iso8601": "$time_iso8601", ' # local time in the ISO 8601 standard format + '"request": "$request", ' # full path no arguments of the request + '"request_uri": "$request_uri", ' # full path and arguments of the request + '"args": "$args", ' # args + '"status": "$status", ' # response status code + '"body_bytes_sent": "$body_bytes_sent", ' # the number of body bytes exclude headers sent to a client + '"bytes_sent": "$bytes_sent", ' # the number of bytes sent to a client + '"http_referer": "$http_referer", ' # HTTP referer + '"http_user_agent": "$http_user_agent", ' # user agent + '"http_x_forwarded_for": "$http_x_forwarded_for", ' # http_x_forwarded_for + '"http_host": "$http_host", ' # the request Host: header + '"server_name": "$server_name", ' # the name of the vhost serving the request + '"request_time": "$request_time", ' # request processing time in seconds with msec resolution + '"upstream": "$upstream_addr", ' # upstream backend server for proxied requests + '"upstream_connect_time": "$upstream_connect_time", ' # upstream handshake time incl. TLS + '"upstream_header_time": "$upstream_header_time", ' # time spent receiving upstream headers + '"upstream_response_time": "$upstream_response_time", ' # time spent receiving upstream body + '"upstream_response_length": "$upstream_response_length", ' # upstream response length + '"upstream_cache_status": "$upstream_cache_status", ' # cache HIT/MISS where applicable + '"ssl_protocol": "$ssl_protocol", ' # TLS protocol + '"ssl_cipher": "$ssl_cipher", ' # TLS cipher + '"scheme": "$scheme", ' # http or https + '"request_method": "$request_method", ' # request method + '"server_protocol": "$server_protocol", ' # request protocol, like HTTP/1.1 or HTTP/2.0 + '"pipe": "$pipe", ' # "p" if request was pipelined, "." otherwise + '"gzip_ratio": "$gzip_ratio"' + '}'; + + access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }}; + error_log {{ nginx_log_dir }}/error.log; + + ## + # SSL/TLS settings + ## + + ssl_protocols {{ NGINX_SSL_PROTOCOLS }}; + ssl_ciphers {{ NGINX_SSL_CIPHERS }}; + ssl_prefer_server_ciphers on; + ssl_dhparam {{ NGINX_DH_PARAMS_PATH }}; + + + ## + # Gzip Settings + ## + + gzip on; + gzip_disable "msie6"; + + gzip_vary on; + gzip_proxied any; + gzip_comp_level 6; + gzip_buffers 16 8k; + gzip_http_version 1.1; + gzip_types text/plain text/css text/xml application/xml application/xml+rss image/svg+xml application/json application/javascript application/x-javascript text/javascript; + + ## + # Virtual Host Configs + ## + + include {{ nginx_conf_dir }}/*.conf; + include {{ nginx_sites_enabled_dir }}/*; +} diff --git a/playbooks/roles/nginx/templates/forum.j2 b/playbooks/roles/nginx/templates/forum.j2 deleted file mode 100644 index 8a7cc75b090..00000000000 --- a/playbooks/roles/nginx/templates/forum.j2 +++ /dev/null @@ -1,52 +0,0 @@ -# -# {{ ansible_managed }} -# -{# This prevents the injected comment from eating the server - directive. There's probably a better way of doing this, - but I don't know it currently. -#} -{% raw %} - -{% endraw %} - -{%- if "forum" in nginx_default_sites -%} - {%- set default_site = "default" -%} -{%- else -%} - {%- set default_site = "" -%} -{%- endif -%} - -{% if devstack %} -{# Connects to webbrick on port 4567 typically. Appropriate for development deployments #} - -upstream forum_app_server { - server localhost:{{ forum_unicorn_port }} fail_timeout=0; -} -{% else %} -{# Connects to unicorn over a unix socket. Appropriate for production deployments #} - -upstream forum_app_server { - server unix:{{ forum_data_dir }}/forum.sock fail_timeout=0; -} -{% endif %} - -server { - - server_name forum.*; - listen {{ FORUM_NGINX_PORT }} {{default_site}}; - client_max_body_size 1M; - keepalive_timeout 5; - - location / { - try_files $uri @proxy_to_app; - } - -location @proxy_to_app { - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $http_host; - - proxy_redirect off; - proxy_pass http://forum_app_server; - } -} \ No newline at end of file diff --git a/playbooks/roles/nginx/templates/lms-preview.j2 b/playbooks/roles/nginx/templates/lms-preview.j2 deleted file mode 100644 index 1dfa6d03c3d..00000000000 --- a/playbooks/roles/nginx/templates/lms-preview.j2 +++ /dev/null @@ -1,78 +0,0 @@ -upstream lms-preview-backend { - {% for host in nginx_lms_preview_gunicorn_hosts %} - server {{ host }}:{{ edxapp_lms_preview_gunicorn_port }} fail_timeout=0; - {% endfor %} -} - -server { - # LMS-preview configuration file for nginx, templated by ansible - - listen {{EDXAPP_LMS_PREVIEW_NGINX_PORT}}; - - server_name preview.*; - - # CS184 requires uploads of up to 4MB for submitting screenshots. - # CMS requires larger value for course assest, values provided - # via hiera. - client_max_body_size 4M; - - rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; - - location @proxy_to_lms-preview_app { - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $http_host; - - proxy_redirect off; - proxy_pass http://lms-preview-backend; - } - - location / { - - {% include "basic-auth.j2" %} - try_files $uri @proxy_to_lms-preview_app; - } - - # No basic auth security on the github_service_hook url, so that github can use it for cms - location /github_service_hook { - try_files $uri @proxy_to_lms-preview_app; - } - - # No basic auth security on the heartbeat url, so that ELB can use it - location /heartbeat { - try_files $uri @proxy_to_lms-preview_app; - } - - # Check security on this - location ~ /static/(?P.*) { - root {{ edxapp_data_dir}}; - try_files /staticfiles/$file /course_static/$file =404; - - # return a 403 for static files that shouldn't be - # in the staticfiles directory - location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) { - return 403; - } - # Set django-pipelined files to maximum cache time - location ~ "/static/(?P.*\.[0-9a-f]{12}\..*)" { - expires max; - # Without this try_files, files that have been run through - # django-pipeline return 404s - try_files /staticfiles/$collected /course_static/$collected =404; - } - - # Expire other static files immediately (there should be very few / none of these) - expires epoch; - } - - # Forward to HTTPS if we're an HTTP request... - if ($http_x_forwarded_proto = "http") { - set $do_redirect "true"; - } - - # Run our actual redirect... - if ($do_redirect = "true") { - rewrite ^ https://$host$request_uri? permanent; - } -} diff --git a/playbooks/roles/nginx/templates/lms.j2 b/playbooks/roles/nginx/templates/lms.j2 deleted file mode 100644 index fcea1964fd5..00000000000 --- a/playbooks/roles/nginx/templates/lms.j2 +++ /dev/null @@ -1,102 +0,0 @@ -{%- if "lms" in nginx_default_sites -%} - {%- set default_site = "default" -%} -{%- else -%} - {%- set default_site = "" -%} -{%- endif -%} - -upstream lms-backend { - {% for host in nginx_lms_gunicorn_hosts %} - server {{ host }}:{{ edxapp_lms_gunicorn_port }} fail_timeout=0; - {% endfor %} -} - -server { - # LMS configuration file for nginx, templated by ansible - - {% if NGINX_ENABLE_SSL %} - - listen {{EDXAPP_LMS_NGINX_PORT}} {{default_site}}; - listen {{EDXAPP_LMS_SSL_NGINX_PORT}} {{default_site}} ssl; - - ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; - ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; - - {% else %} - listen {{EDXAPP_LMS_NGINX_PORT}} {{default_site}}; - {% endif %} - - access_log {{ nginx_log_dir }}/access.log; - error_log {{ nginx_log_dir }}/error.log error; - - # CS184 requires uploads of up to 4MB for submitting screenshots. - # CMS requires larger value for course assest, values provided - # via hiera. - client_max_body_size 4M; - - rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; - - location @proxy_to_lms_app { - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $http_host; - - proxy_redirect off; - proxy_pass http://lms-backend; - } - - location / { - {% include "basic-auth.j2" %} - try_files $uri @proxy_to_lms_app; - } - - # No basic auth security on the github_service_hook url, so that github can use it for cms - location /github_service_hook { - try_files $uri @proxy_to_lms_app; - } - - # No basic auth security on the heartbeat url, so that ELB can use it - location /heartbeat { - try_files $uri @proxy_to_lms_app; - } - - # Check security on this - location ~ /static/(?P.*) { - root {{ edxapp_data_dir }}; - try_files /staticfiles/$file /course_static/$file =404; - - # return a 403 for static files that shouldn't be - # in the staticfiles directory - location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) { - return 403; - } - - # http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default - location ~ "/static/(?P.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" { - expires max; - add_header Access-Control-Allow-Origin *; - try_files /staticfiles/$collected /course_static/$collected =404; - } - - # Set django-pipelined files to maximum cache time - location ~ "/static/(?P.*\.[0-9a-f]{12}\..*)" { - expires max; - # Without this try_files, files that have been run through - # django-pipeline return 404s - try_files /staticfiles/$collected /course_static/$collected =404; - } - - # Expire other static files immediately (there should be very few / none of these) - expires epoch; - } - - # Forward to HTTPS if we're an HTTP request... - if ($http_x_forwarded_proto = "http") { - set $do_redirect "true"; - } - - # Run our actual redirect... - if ($do_redirect = "true") { - rewrite ^ https://$host$request_uri? permanent; - } -} diff --git a/playbooks/roles/nginx/templates/nginx.conf.j2 b/playbooks/roles/nginx/templates/nginx.conf.j2 deleted file mode 100644 index 1633d66d1ea..00000000000 --- a/playbooks/roles/nginx/templates/nginx.conf.j2 +++ /dev/null @@ -1,68 +0,0 @@ -user www-data; -worker_processes 4; -pid /var/run/nginx.pid; - -events { - worker_connections 768; - # multi_accept on; -} - -http { - - ## - # Basic Settings - ## - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - # increase header buffer for for https://edx-wiki.atlassian.net/browse/LMS-467> - # see http://orensol.com/2009/01/18/nginx-and-weird-400-bad-request-responses/ - large_client_header_buffers 4 16k; - # server_tokens off; - - # server_names_hash_bucket_size 64; - # server_name_in_redirect off; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - ## - # Logging Settings - ## - - log_format p_combined '$http_x_forwarded_for - $remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent $request_time ' - '"$http_referer" "$http_user_agent"'; - - log_format ssl_combined '$remote_addr - $ssl_client_s_dn - "$upstream_addr" [$time_local] ' - '"$request" $status $body_bytes_sent $request_time ' - '"$http_referer" "$http_user_agent"'; - - access_log {{ nginx_log_dir }}/access.log p_combined; - error_log {{ nginx_log_dir }}/error.log; - - ## - # Gzip Settings - ## - - gzip on; - gzip_disable "msie6"; - - gzip_vary on; - gzip_proxied any; - gzip_comp_level 6; - gzip_buffers 16 8k; - gzip_http_version 1.1; - gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; - - ## - # Virtual Host Configs - ## - - include {{ nginx_sites_enabled_dir }}/*; - include {{ nginx_conf_dir }}/*.conf; -} - diff --git a/playbooks/roles/nginx/templates/ora.j2 b/playbooks/roles/nginx/templates/ora.j2 deleted file mode 100644 index 9a90a1545d8..00000000000 --- a/playbooks/roles/nginx/templates/ora.j2 +++ /dev/null @@ -1,44 +0,0 @@ -upstream app_server { - {% for host in nginx_ora_gunicorn_hosts %} - server {{ host }}:{{ ora_gunicorn_port }} fail_timeout=0; - {% endfor %} -} - -server { - listen {{ ORA_NGINX_PORT }} default_server; - - location / { - - {% include "basic-auth.j2" %} - try_files $uri @proxy_to_app; - } - - # Check security on this - location /static/ { - alias /opt/wwc/staticfiles/; - - # return a 403 for static files that shouldn't be - # in the staticfiles directory - location ~ ^/static/(.*)(\.xml|\.json|README.TXT) { - return 403; - } - # Set django-pipelined files to maximum cache time - location ~ /static/.*\.[0-9a-f]+\..* { - expires max; - } - - # Expire other static files immediately (there should be very few / none of these) - expires epoch; - } - - location @proxy_to_app { - client_max_body_size 75K; - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $http_host; - - proxy_redirect off; - proxy_pass http://app_server; - } -} diff --git a/playbooks/roles/nginx/templates/xqueue.j2 b/playbooks/roles/nginx/templates/xqueue.j2 deleted file mode 100644 index 95379766b32..00000000000 --- a/playbooks/roles/nginx/templates/xqueue.j2 +++ /dev/null @@ -1,30 +0,0 @@ -upstream xqueue_app_server { - {% for host in nginx_xqueue_gunicorn_hosts %} - server {{ host }}:{{ xqueue_gunicorn_port }} fail_timeout=0; - {% endfor %} -} - -server { - listen {{ XQUEUE_NGINX_PORT }} default_server; - - location / { - {% include "basic-auth.j2" %} - try_files $uri @proxy_to_app; - } - - # No basic auth security on the heartbeat url, so that ELB can use it - location /xqueue/status/{ - try_files $uri @proxy_to_app; - } - -location @proxy_to_app { - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $http_host; - - proxy_redirect off; - proxy_pass http://xqueue_app_server; - } -} - diff --git a/playbooks/roles/nginx/templates/xserver.j2 b/playbooks/roles/nginx/templates/xserver.j2 deleted file mode 100644 index 79562948dfd..00000000000 --- a/playbooks/roles/nginx/templates/xserver.j2 +++ /dev/null @@ -1,35 +0,0 @@ -# of Nginx configuration files in order to fully unleash the power of Nginx. -# http://wiki.nginx.org/Pitfalls -# http://wiki.nginx.org/QuickStart -# http://wiki.nginx.org/Configuration -# -# Generally, you will want to move this file somewhere, and start with a clean -# file but keep this around for reference. Or just disable in sites-enabled. -# -# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples. -## -upstream xserver_app_server { - {% for host in nginx_xserver_gunicorn_hosts %} - server {{ host }}:{{ xserver_gunicorn_port }} fail_timeout=0; - {% endfor %} -} - -server { - listen {{ XSERVER_NGINX_PORT }} default_server; - - location / { - {% include "basic-auth.j2" %} - try_files $uri @proxy_to_app; - } - - -location @proxy_to_app { - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - proxy_set_header Host $http_host; - - proxy_redirect off; - proxy_pass http://xserver_app_server; - } -} diff --git a/playbooks/roles/nltk/defaults/main.yml b/playbooks/roles/nltk/defaults/main.yml new file mode 100644 index 00000000000..9e439d6acc3 --- /dev/null +++ b/playbooks/roles/nltk/defaults/main.yml @@ -0,0 +1,15 @@ +--- +NLTK_DATA_DIR: "/usr/local/share/nltk_data" + +# Once the file is downloaded, it won't be downloaded again, +# so if you need to version the data files, you should upload +# your own version of the files with the version appended to the filename. +NLTK_DATA: + - { path: "taggers/maxent_treebank_pos_tagger", + url: "http://nltk.github.io/nltk_data/packages/taggers/maxent_treebank_pos_tagger.zip" } + - { path: "corpora/stopwords", + url: "http://nltk.github.io/nltk_data/packages/corpora/stopwords.zip" } + - { path: "corpora/wordnet", + url: "http://nltk.github.io/nltk_data/packages/corpora/wordnet.zip" } + +NLTK_DOWNLOAD_TIMEOUT: 100 diff --git a/playbooks/roles/nltk/tasks/main.yml b/playbooks/roles/nltk/tasks/main.yml new file mode 100644 index 00000000000..9681c67f10e --- /dev/null +++ b/playbooks/roles/nltk/tasks/main.yml @@ -0,0 +1,33 @@ +--- + +- name: Install unzip + apt: pkg=unzip state=present update_cache=yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + +- name: create the nltk data directory and subdirectories + file: path={{ NLTK_DATA_DIR }}/{{ item.path|dirname }} state=directory + with_items: "{{ NLTK_DATA }}" + tags: + - deploy + +- name: download nltk data + get_url: + dest: "{{ NLTK_DATA_DIR }}/{{ item.url|basename }}" + url: "{{ item.url }}" + timeout: "{{ NLTK_DOWNLOAD_TIMEOUT }}" + with_items: "{{ NLTK_DATA }}" + register: nltk_download + tags: + - deploy + +- name: unarchive nltk data + shell: "unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }}" + args: + chdir: "{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}" + with_items: "{{ NLTK_DATA }}" + when: nltk_download is changed + tags: + - deploy diff --git a/playbooks/roles/notifier/defaults/main.yml b/playbooks/roles/notifier/defaults/main.yml deleted file mode 100644 index ab6a42f7426..00000000000 --- a/playbooks/roles/notifier/defaults/main.yml +++ /dev/null @@ -1,100 +0,0 @@ ---- - -NOTIFIER_USER: "notifier" -NOTIFIER_WEB_USER: "www-data" -NOTIFIER_HOME: "{{ COMMON_APP_DIR }}/notifier" -NOTIFIER_VENV_DIR: "{{ NOTIFIER_HOME }}/virtualenvs/notifier" -NOTIFIER_DB_DIR: "{{ NOTIFIER_HOME }}/db" -NOTIFIER_SOURCE_REPO: "https://github.com/edx/notifier.git" -NOTIFIER_CODE_DIR: "{{ NOTIFIER_HOME }}/src" -NOTIFIER_VERSION: "master" -NOTIFIER_GIT_IDENTITY_PATH: "{{ secure_dir }}/files/git-identity" -NOTIFIER_REQUIREMENTS_FILE: "{{ NOTIFIER_CODE_DIR }}/requirements.txt" -NOTIFIER_LOG_LEVEL: "INFO" -NOTIFIER_RSYSLOG_ENABLED: "yes" -NOTIFIER_DIGEST_TASK_INTERVAL: "1440" - -NOTIFIER_DIGEST_EMAIL_SENDER: "notifications@example.com" -NOTIFIER_DIGEST_EMAIL_SUBJECT: "Daily Discussion Digest" -NOTIFIER_DIGEST_EMAIL_TITLE: "Discussion Digest" -NOTIFIER_DIGEST_EMAIL_DESCRIPTION: "A digest of unread content from course discussions you are following." -NOTIFIER_EMAIL_SENDER_POSTAL_ADDRESS: "" - -NOTIFIER_LANGUAGE: "" - -NOTIFIER_ENV: "Development" - -NOTIFIER_EMAIL_BACKEND: "console" -NOTIFIER_EMAIL_HOST: "localhost" -NOTIFIER_EMAIL_PORT: 25 -NOTIFIER_EMAIL_USER: "" -NOTIFIER_EMAIL_PASS: "" -NOTIFIER_EMAIL_USE_TLS: "False" - -NOTIFIER_EMAIL_REWRITE_RECIPIENT: "" - -NOTIFIER_LMS_URL_BASE: "http://localhost:8000/" -NOTIFIER_LMS_SECRET_KEY: "PUT_YOUR_SECRET_KEY_HERE" - -NOTIFIER_COMMENT_SERVICE_BASE: "http://localhost:4567/" -NOTIFIER_COMMENT_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE" - -NOTIFIER_USER_SERVICE_BASE: "http://localhost:8000/" -NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE" -NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: !!null -NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: !!null -NOTIFIER_CELERY_BROKER_URL: "django://" -NOTIFIER_LOGO_IMAGE_URL: "{{ NOTIFIER_LMS_URL_BASE }}/static/images/header-logo.png" -NOTIFIER_SUPERVISOR_LOG_DEST: "{{ COMMON_DATA_DIR }}/log/supervisor" - -NOTIFER_REQUESTS_CA_BUNDLE: "/etc/ssl/certs/ca-certificates.crt" - -NOTIFIER_DD_API_KEY: "NOT_USED" # data dog - -notifier_debian_pkgs: - - apparmor-utils - - build-essential - - curl - - g++ - - gcc - - ipython - - pkg-config - - rsyslog - -# -# This structure is iterated over in order to build both -# the environment file for the notifier which is sourced -# into the applications's environment and for building -# the env variable for the supervisor job definition. -# -notifier_env_vars: - FORUM_DIGEST_EMAIL_SENDER: $NOTIFIER_DIGEST_EMAIL_SENDER - FORUM_DIGEST_EMAIL_SUBJECT: $NOTIFIER_DIGEST_EMAIL_SUBJECT - FORUM_DIGEST_EMAIL_TITLE: $NOTIFIER_DIGEST_EMAIL_TITLE - FORUM_DIGEST_EMAIL_DESCRIPTION: $NOTIFIER_DIGEST_EMAIL_DESCRIPTION - EMAIL_SENDER_POSTAL_ADDRESS: $NOTIFIER_EMAIL_SENDER_POSTAL_ADDRESS - NOTIFIER_LANGUAGE: $NOTIFIER_LANGUAGE - NOTIFIER_ENV: $NOTIFIER_ENV - NOTIFIER_DB_DIR: $NOTIFIER_DB_DIR - EMAIL_BACKEND: $NOTIFIER_EMAIL_BACKEND - EMAIL_HOST: $NOTIFIER_EMAIL_HOST - EMAIL_PORT: $NOTIFIER_EMAIL_PORT - EMAIL_HOST_USER: $NOTIFIER_EMAIL_USER - EMAIL_HOST_PASSWORD: $NOTIFIER_EMAIL_PASS - EMAIL_USE_TLS: $NOTIFIER_EMAIL_USE_TLS - EMAIL_REWRITE_RECIPIENT: $NOTIFIER_EMAIL_REWRITE_RECIPIENT - LMS_URL_BASE: $NOTIFIER_LMS_URL_BASE - SECRET_KEY: $NOTIFIER_LMS_SECRET_KEY - CS_URL_BASE: $NOTIFIER_COMMENT_SERVICE_BASE - CS_API_KEY: $NOTIFIER_COMMENT_SERVICE_API_KEY - US_URL_BASE: $NOTIFIER_USER_SERVICE_BASE - US_API_KEY: $NOTIFIER_USER_SERVICE_API_KEY - DATADOG_API_KEY: $NOTIFIER_DD_API_KEY - LOG_LEVEL: $NOTIFIER_LOG_LEVEL - RSYSLOG_ENABLED: $NOTIFIER_RSYSLOG_ENABLED - BROKER_URL: $NOTIFIER_CELERY_BROKER_URL - REQUESTS_CA_BUNDLE: $NOTIFER_REQUESTS_CA_BUNDLE - US_HTTP_AUTH_USER: $NOTIFIER_USER_SERVICE_HTTP_AUTH_USER - US_HTTP_AUTH_PASS: $NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS - FORUM_DIGEST_TASK_INTERVAL: $NOTIFIER_DIGEST_TASK_INTERVAL - LOGO_IMAGE_URL: $NOTIFIER_LOGO_IMAGE_URL diff --git a/playbooks/roles/notifier/handlers/main.yml b/playbooks/roles/notifier/handlers/main.yml deleted file mode 100644 index bb98f6ea510..00000000000 --- a/playbooks/roles/notifier/handlers/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- name: restart notifier-scheduler - supervisorctl_local: > - name=notifier-scheduler - state=restarted - config={{ supervisor_cfg }} - supervisorctl_path={{ supervisor_ctl }} - -- name: restart notifier-celery-workers - supervisorctl_local: > - name=notifier-celery-workers - state=restarted - config={{ supervisor_cfg }} - supervisorctl_path={{ supervisor_ctl }} diff --git a/playbooks/roles/notifier/meta/main.yml b/playbooks/roles/notifier/meta/main.yml deleted file mode 100644 index 107f1e98c29..00000000000 --- a/playbooks/roles/notifier/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - supervisor diff --git a/playbooks/roles/notifier/tasks/deploy.yml b/playbooks/roles/notifier/tasks/deploy.yml deleted file mode 100644 index 7fbc32e3151..00000000000 --- a/playbooks/roles/notifier/tasks/deploy.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- - -- name: checkout code - git: - dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }} - version={{ NOTIFIER_VERSION }} - sudo: true - sudo_user: "{{ NOTIFIER_USER }}" - notify: - - restart notifier-scheduler - - restart notifier-celery-workers - -- name: source repo group perms - file: - path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory - -- name: install application requirements - pip: - requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}" - virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present - sudo: true - sudo_user: "{{ NOTIFIER_USER }}" - notify: - - restart notifier-scheduler - - restart notifier-celery-workers - -# Syncdb for whatever reason always creates the file owned by www-data:www-data, and then -# complains it can't write because it's running as notifier. So this is to touch the file into -# place with proper perms first. -- name: fix permissions on notifer db file - file: > - path={{ NOTIFIER_DB_DIR }}/notifier.db state=touch owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }} - mode=0664 - sudo: true - notify: - - restart notifier-scheduler - - restart notifier-celery-workers - tags: - - deploy - -- name: syncdb - shell: > - cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb - sudo: true - sudo_user: "{{ NOTIFIER_USER }}" - environment: notifier_env_vars - notify: - - restart notifier-scheduler - - restart notifier-celery-workers diff --git a/playbooks/roles/notifier/tasks/main.yml b/playbooks/roles/notifier/tasks/main.yml deleted file mode 100644 index 4e391e15ab3..00000000000 --- a/playbooks/roles/notifier/tasks/main.yml +++ /dev/null @@ -1,102 +0,0 @@ ---- - -# -# notifier -# -# Overview: -# -# Provides the edX notifier service, a service for sending -# notifications over messaging protocols. -# -# Dependencies: -# -# * common -# -# Example play: -# roles: -# - common -# - notifier -# -- name: install notifier specific system packages - apt: pkg={{','.join(notifier_debian_pkgs)}} state=present - -- name: check if incommon ca is installed - command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt - register: incommon_present - ignore_errors: yes - -- name: create incommon ca directory - file: - path="/usr/share/ca-certificates/incommon" mode=2775 state=directory - when: incommon_present|failed - -- name: retrieve incommon server CA - shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt - when: incommon_present|failed - -- name: add InCommon ca cert - lineinfile: - dest=/etc/ca-certificates.conf - regexp='incommon/InCommonServerCA.crt' - line='incommon/InCommonServerCA.crt' - -- name: update ca certs globally - shell: update-ca-certificates - -- name: create notifier user {{ NOTIFIER_USER }} - user: - name={{ NOTIFIER_USER }} state=present shell=/bin/bash - home={{ NOTIFIER_HOME }} createhome=yes - -- name: setup the notifier env - template: - src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env - owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}" - -- name: drop a bash_profile - copy: > - src=../../common/files/bash_profile - dest={{ NOTIFIER_HOME }}/.bash_profile - owner={{ NOTIFIER_USER }} - group={{ NOTIFIER_USER }} - -- name: ensure .bashrc exists - shell: touch {{ NOTIFIER_HOME }}/.bashrc - sudo: true - sudo_user: "{{ NOTIFIER_USER }}" - -- name: add source of notifier_env to .bashrc - lineinfile: - dest={{ NOTIFIER_HOME }}/.bashrc - regexp='. {{ NOTIFIER_HOME }}/notifier_env' - line='. {{ NOTIFIER_HOME }}/notifier_env' - -- name: add source venv to .bashrc - lineinfile: - dest={{ NOTIFIER_HOME }}/.bashrc - regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate' - line='. {{ NOTIFIER_VENV_DIR }}/bin/activate' - -- name: create notifier DB directory - file: - path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }} - -- name: create notifier/bin directory - file: - path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }} - -- name: supervisord config for celery workers - template: > - src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 - dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf" - sudo_user: "{{ supervisor_user }}" - notify: restart notifier-celery-workers - -- name: supervisord config for scheduler - template: > - src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2 - dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf" - sudo_user: "{{ supervisor_user }}" - notify: restart notifier-scheduler - -- include: deploy.yml tags=deploy diff --git a/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 b/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 deleted file mode 100644 index 5754b4cb6a4..00000000000 --- a/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 +++ /dev/null @@ -1,31 +0,0 @@ -; -; {{ ansible_managed }} -; -[program:notifier-celery-workers] - -command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py celery worker -l {{ NOTIFIER_LOG_LEVEL }} - -process_name=%(program_name)s -numprocs=1 -directory={{ NOTIFIER_CODE_DIR }} -umask=022 -autostart=true -autorestart=true -startsecs=10 -startretries=3 -exitcodes=0,2 -stopsignal=TERM -stopwaitsecs=10 -redirect_stderr=false -stdout_logfile={{NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-celery-workers-stdout.log -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=10 -stdout_capture_maxbytes=1MB -stderr_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-celery-workers-stderr.log -stderr_logfile_maxbytes=1MB -stderr_logfile_backups=10 -stderr_capture_maxbytes=1MB -environment=PID='/var/tmp/notifier-celery-workers.pid',LANG=en_US.UTF-8, -{%- for name,value in notifier_env_vars.items() -%} -{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%} -{%- endfor -%} diff --git a/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-scheduler.conf.j2 b/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-scheduler.conf.j2 deleted file mode 100644 index 7db6e5b963a..00000000000 --- a/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-scheduler.conf.j2 +++ /dev/null @@ -1,31 +0,0 @@ -; -; {{ ansible_managed }} -; -[program:notifier-scheduler] - -command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py scheduler - -process_name=%(program_name)s -numprocs=1 -directory={{ NOTIFIER_CODE_DIR }} -umask=022 -autostart=true -autorestart=true -startsecs=10 -startretries=3 -exitcodes=0,2 -stopsignal=TERM -stopwaitsecs=10 -redirect_stderr=false -stdout_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-scheduler-stdout.log -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=10 -stdout_capture_maxbytes=1MB -stderr_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-scheduler-stderr.log -stderr_logfile_maxbytes=1MB -stderr_logfile_backups=10 -stderr_capture_maxbytes=1MB -environment=PID='/var/tmp/notifier-scheduler.pid',LANG=en_US.UTF-8, -{%- for name,value in notifier_env_vars.items() -%} -{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%} -{%- endfor -%} diff --git a/playbooks/roles/notifier/templates/notifier_env.j2 b/playbooks/roles/notifier/templates/notifier_env.j2 deleted file mode 100644 index 283cf98cea4..00000000000 --- a/playbooks/roles/notifier/templates/notifier_env.j2 +++ /dev/null @@ -1,7 +0,0 @@ -# {{ ansible_managed }} - -{% for name,value in notifier_env_vars.items() %} -{% if value %} -export {{ name }}="{{ value }}" -{% endif %} -{% endfor %} diff --git a/playbooks/roles/oauth2_proxy/defaults/main.yml b/playbooks/roles/oauth2_proxy/defaults/main.yml new file mode 100644 index 00000000000..5a025b0774b --- /dev/null +++ b/playbooks/roles/oauth2_proxy/defaults/main.yml @@ -0,0 +1,60 @@ +--- +oauth2_proxy_app_dir: "{{ COMMON_APP_DIR }}/oauth2_proxy" +oauth2_proxy_conf_dir: "{{ COMMON_CFG_DIR }}/oauth2_proxy" + +oauth2_proxy_user: "oauth2_proxy" + +# We define this tuple here separately because we need to know it for downloading the right tarball. Given that they +# bake in both the version number -- which doesn't always match the actual Git tag they release off -- and the Go version, +# it's nearly impossible to use only `OAUTH2_PROXY_VERSION` to build a valid URL. +OAUTH2_PROXY_VERSION: "2.2.0" +oauth2_proxy_version_tuple: "2.2.0.linux-amd64.go1.8.1" +oauth2_proxy_pkg_name: "oauth2_proxy-{{ oauth2_proxy_version_tuple }}" +oauth2_proxy_release_url: "https://github.com/bitly/oauth2_proxy/releases/download/v2.2/%7B%7B%20oauth2_proxy_pkg_name%20%7D%7D.tar.gz" +oauth2_proxy_release_sha256: "1c16698ed0c85aa47aeb80e608f723835d9d1a8b98bd9ae36a514826b3acce56" + +oauth2_proxy_listen_port: 4180 +oauth2_proxy_listen_addr: "0.0.0.0" +oauth2_proxy_upstreams: ["localhost:80"] # List of address:port values acting as upstreams/backends. +oauth2_proxy_request_logging: true +oauth2_proxy_pass_basic_auth: true # Pass Basic Authorization header to upstream(s). +oauth2_proxy_pass_user_headers: true # Passes X-Forwarded-User and X-Forwarded-Email to upstream(s). +oauth2_proxy_pass_host_header: true # Pass original Host header to upstream(s). If false, Host header will come from upstream address. +oauth2_proxy_pass_access_token: true # Pass OAuth access token via X-Forwarded-Access-Token header to upstream(s). +oauth2_proxy_email_domains: ["example.com"] # Which e-mail domains, if any, to validate for. Needed for things like validating a specific G Suite apps domain, etc. +oauth2_proxy_provider: "google" # OAuth provider type. +oauth2_proxy_client_id: "CHANGEME-OAUTH2-CLIENT-ID" # OAuth client ID. +oauth2_proxy_client_secret: "CHANGEME-OAUTH2-CLIENT-SECRET" # OAuth client secret. +oauth2_proxy_custom_templates_dir: "" # Directory having template overrides for the login/error pages. +oauth2_proxy_cookie_name: "_oauth2_proxy" # Client-side browser cookie name. +oauth2_proxy_cookie_secret: "CHANGEME-COOKIE-SECRET" # Cookie encryption secret. +oauth2_proxy_cookie_domain: "example.com" # Domain pattern for this cookie. +oauth2_proxy_cookie_expire: "168h" # How long before the cookie expires. (168h = 7 days) +oauth2_proxy_cookie_refresh: "4h" # How long since cookie issuance (and since last refresh) to validate existing OAuth token. +oauth2_proxy_cookie_secure: true # Whether or not cookie is HTTPS only. +oauth2_proxy_cookie_httponly: true # Whether or not cookie is browser-only (i.e. Javascript can't access it) + +oauth2_proxy_services: + - { service: "oauth2_proxy", host: "localhost", port: "{{ oauth2_proxy_listen_port }}" } + +oauth2_proxy_config: + http_address: "{{ oauth2_proxy_listen_addr }}:{{ oauth2_proxy_listen_port }}" + upstreams: "{{ oauth2_proxy_upstreams }}" + request_logging: "{{ oauth2_proxy_request_logging }}" + pass_basic_auth: "{{ oauth2_proxy_pass_basic_auth }}" + pass_user_headers: "{{ oauth2_proxy_pass_user_headers }}" + pass_host_header: "{{ oauth2_proxy_pass_host_header }}" + pass_access_token: "{{ oauth2_proxy_pass_access_token }}" + email_domains: "{{ oauth2_proxy_email_domains }}" + provider: "{{ oauth2_proxy_provider }}" + client_id: "{{ oauth2_proxy_client_id }}" + client_secret: "{{ oauth2_proxy_client_secret }}" + custom_templates_dir: "{{ oauth2_proxy_custom_templates_dir }}" + cookie_name: "{{ oauth2_proxy_cookie_name }}" + cookie_secret: "{{ oauth2_proxy_cookie_secret }}" + cookie_domain: "{{ oauth2_proxy_cookie_domain }}" + cookie_expire: "{{ oauth2_proxy_cookie_expire }}" + cookie_refresh: "{{ oauth2_proxy_cookie_refresh }}" + cookie_secure: "{{ oauth2_proxy_cookie_secure }}" + cookie_httponly: "{{ oauth2_proxy_cookie_httponly }}" + diff --git a/playbooks/roles/oauth2_proxy/meta/main.yml b/playbooks/roles/oauth2_proxy/meta/main.yml new file mode 100644 index 00000000000..df97e0e45f1 --- /dev/null +++ b/playbooks/roles/oauth2_proxy/meta/main.yml @@ -0,0 +1,7 @@ +--- +dependencies: + - role: common + tags: + - always # We want to make sure the role always runs, otherwise the system isn't in a state to install Python/Supervisord. + - config-encoders + - supervisor diff --git a/playbooks/roles/oauth2_proxy/tasks/deploy.yml b/playbooks/roles/oauth2_proxy/tasks/deploy.yml new file mode 100644 index 00000000000..1aa95123973 --- /dev/null +++ b/playbooks/roles/oauth2_proxy/tasks/deploy.yml @@ -0,0 +1,95 @@ +--- +- name: create the supervisor config + template: + src: oauth2_proxy_supervisor.conf.j2 + dest: "{{ supervisor_available_dir }}/oauth2_proxy.conf" + owner: "{{ supervisor_user }}" + group: "{{ supervisor_user }}" + mode: 0644 + become_user: "{{ supervisor_user }}" + register: oauth2_proxy_supervisor + tags: + - install + - install:configuration + +- name: enable the supervisor config + file: + src: "{{ supervisor_available_dir }}/oauth2_proxy.conf" + dest: "{{ supervisor_cfg_dir }}/oauth2_proxy.conf" + owner: "{{ supervisor_user }}" + state: link + force: yes + mode: 0644 + become_user: "{{ supervisor_user }}" + register: oauth2_proxy_supervisor + tags: + - install + - install:configuration + +- name: download oauth2_proxy release + get_url: + url: "{{ oauth2_proxy_release_url }}" + dest: "/tmp/oauth2_proxy.tar.gz" + force: yes + sha256sum: "{{ oauth2_proxy_release_sha256 }}" + tags: + - install + - install:configuration + +- name: extract the oauth2_proxy release + unarchive: + src: "/tmp/oauth2_proxy.tar.gz" + dest: "/tmp" + remote_src: True + tags: + - install + - install:configuration + +- name: move the oauth2_proxy binary into place + command: "mv /tmp/{{ oauth2_proxy_pkg_name }}/oauth2_proxy {{ oauth2_proxy_app_dir }}/" + tags: + - install + - install:configuration + +- name: update oauth2_proxy configuration + template: + src: oauth2_proxy.cfg.j2 + dest: "{{ oauth2_proxy_conf_dir }}/oauth2_proxy.cfg" + owner: "{{ oauth2_proxy_user }}" + group: "{{ common_web_group }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + register: supervisor_update + changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" + when: not disable_edx_services + tags: + - manage + - manage:start + - manage:update + +- name: ensure oauth2_proxy is started + supervisorctl: + name: oauth2_proxy + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + tags: + - manage + - manage:start + +- include: test.yml + tags: + - deploy + +- include: tag_ec2.yml + when: COMMON_TAG_EC2_INSTANCE + tags: + - deploy + +- set_fact: + oauth2_proxy_installed: true diff --git a/playbooks/roles/oauth2_proxy/tasks/main.yml b/playbooks/roles/oauth2_proxy/tasks/main.yml new file mode 100644 index 00000000000..6c04a941186 --- /dev/null +++ b/playbooks/roles/oauth2_proxy/tasks/main.yml @@ -0,0 +1,41 @@ +--- +# oauth2_proxy +# +# Dependencies: +# +# * common + +- name: create application user + user: + name: "{{ oauth2_proxy_user }}" + home: "{{ oauth2_proxy_app_dir }}" + createhome: yes + shell: /bin/false + generate_ssh_key: yes + tags: + - install + - install:base + +- name: set oauth2_proxy app dir permissions + file: + path: "{{ oauth2_proxy_app_dir }}" + state: directory + owner: "{{ oauth2_proxy_user }}" + group: "{{ common_web_group }}" + tags: + - install + - install:base + +- name: set oauth2_proxy conf dir permissions + file: + path: "{{ oauth2_proxy_conf_dir }}" + state: directory + owner: "{{ oauth2_proxy_user }}" + group: "{{ common_web_group }}" + tags: + - install + - install:base + +- include: deploy.yml + tags: + - deploy diff --git a/playbooks/roles/oauth2_proxy/tasks/tag_ec2.yml b/playbooks/roles/oauth2_proxy/tasks/tag_ec2.yml new file mode 100644 index 00000000000..2dbeb67ae1c --- /dev/null +++ b/playbooks/roles/oauth2_proxy/tasks/tag_ec2.yml @@ -0,0 +1,10 @@ +--- +- name: get instance information + action: ec2_metadata_facts + +- name: tag instance + ec2_tag: + resource: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + tags: + "version:oauth2_proxy" : "{{ OAUTH2_PROXY_VERSION }} {{ oauth2_proxy_release_sha256 }}" diff --git a/playbooks/roles/oauth2_proxy/tasks/test.yml b/playbooks/roles/oauth2_proxy/tasks/test.yml new file mode 100644 index 00000000000..525db0b759a --- /dev/null +++ b/playbooks/roles/oauth2_proxy/tasks/test.yml @@ -0,0 +1,7 @@ +--- +- name: test that the required service are listening + wait_for: + port: "{{ item.port }}" + host: "{{ item.host }}" + timeout: 30 + with_items: "{{ oauth2_proxy_services }}" diff --git a/playbooks/roles/oauth2_proxy/templates/oauth2_proxy.cfg.j2 b/playbooks/roles/oauth2_proxy/templates/oauth2_proxy.cfg.j2 new file mode 100644 index 00000000000..5344a1697d3 --- /dev/null +++ b/playbooks/roles/oauth2_proxy/templates/oauth2_proxy.cfg.j2 @@ -0,0 +1 @@ +{{ oauth2_proxy_config | encode_toml }} diff --git a/playbooks/roles/oauth2_proxy/templates/oauth2_proxy_supervisor.conf.j2 b/playbooks/roles/oauth2_proxy/templates/oauth2_proxy_supervisor.conf.j2 new file mode 100644 index 00000000000..fdb384703e5 --- /dev/null +++ b/playbooks/roles/oauth2_proxy/templates/oauth2_proxy_supervisor.conf.j2 @@ -0,0 +1,7 @@ +[program:oauth2_proxy] +command={{ oauth2_proxy_app_dir }}/oauth2_proxy -config {{ oauth2_proxy_conf_dir }}/oauth2_proxy.cfg +priority=999 +user={{ oauth2_proxy_user }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +stopsignal=QUIT diff --git a/playbooks/roles/oauth_client_setup/defaults/main.yml b/playbooks/roles/oauth_client_setup/defaults/main.yml new file mode 100644 index 00000000000..1d2fa3a3002 --- /dev/null +++ b/playbooks/roles/oauth_client_setup/defaults/main.yml @@ -0,0 +1,151 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role oauth_client_setup +# + +# +# vars are namespaced with the module name. +# +# +oauth_client_setup_role_name: oauth_client_setup + +oauth_client_setup_oauth2_clients: + - { + name: "{{ ecommerce_service_name | default('None') }}", + url_root: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT | default('None') }}", + sso_id: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ ECOMMERCE_LOGOUT_URL | default('None') }}", + username: "{{ ECOMMERCE_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ EDXAPP_CMS_SERVICE_NAME | default('None') }}", + url_root: "{{ EDXAPP_CMS_URL_ROOT | default('None') }}", + sso_id: "{{ EDXAPP_CMS_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ EDXAPP_CMS_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ EDXAPP_CMS_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ EDXAPP_CMS_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ EDXAPP_CMS_LOGOUT_URL | default('None') }}", + username: "{{ EDXAPP_CMS_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ INSIGHTS_OAUTH2_APP_CLIENT_NAME | default('None') }}", + url_root: "{{ INSIGHTS_BASE_URL | default('None') }}", + id: "{{ INSIGHTS_OAUTH2_KEY | default('None') }}", + secret: "{{ INSIGHTS_OAUTH2_SECRET | default('None') }}", + sso_id: "{{ INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ INSIGHTS_LOGOUT_URL | default('None') }}", + username: "{{ INSIGHTS_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ credentials_service_name | default('None') }}", + url_root: "{{ CREDENTIALS_URL_ROOT | default('None') }}", + sso_id: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ CREDENTIALS_LOGOUT_URL | default('None') }}", + username: "{{ CREDENTIALS_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ discovery_service_name | default('None') }}", + url_root: "{{ DISCOVERY_URL_ROOT | default('None') }}", + sso_id: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ DISCOVERY_LOGOUT_URL | default('None') }}", + username: "{{ DISCOVERY_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ veda_web_frontend_service_name | default('None') }}", + url_root: "{{ VEDA_WEB_FRONTEND_OAUTH2_URL | default('None') }}", + logout_uri: "{{ VEDA_WEB_FRONTEND_LOGOUT_URL | default('None') }}", + username: "{{ EDXAPP_VEDA_SERVICE_USER_NAME | default('None') }}" + } + - { + name: "{{ registrar_service_name | default('None') }}", + url_root: "{{ REGISTRAR_URL_ROOT | default('None') }}", + sso_id: "{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ REGISTRAR_LOGOUT_URL | default('None') }}", + username: "{{ REGISTRAR_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ designer_service_name | default('None') }}", + url_root: "{{ DESIGNER_URL_ROOT | default('None') }}", + sso_id: "{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ DESIGNER_LOGOUT_URL | default('None') }}", + username: "{{ DESIGNER_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ retirement_service_name if COMMON_RETIREMENT_SERVICE_SETUP|default(false)|bool else 'None' }}", + backend_service_id: "{{ RETIREMENT_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ RETIREMENT_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + username: "{{ EDXAPP_RETIREMENT_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ license_manager_service_name | default('None') }}", + url_root: "{{ LICENSE_MANAGER_URL_ROOT | default('None') }}", + sso_id: "{{ LICENSE_MANAGER_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ LICENSE_MANAGER_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ LICENSE_MANAGER_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ LICENSE_MANAGER_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ LICENSE_MANAGER_LOGOUT_URL | default('None') }}", + username: "{{ LICENSE_MANAGER_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ enterprise_catalog_service_name | default('None') }}", + url_root: "{{ ENTERPRISE_CATALOG_URL_ROOT | default('None') }}", + sso_id: "{{ ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ ENTERPRISE_CATALOG_LOGOUT_URL | default('None') }}", + username: "{{ ENTERPRISE_CATALOG_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ commerce_coordinator_service_name | default('None') }}", + url_root: "{{ COMMERCE_COORDINATOR_URL_ROOT | default('None') }}", + sso_id: "{{ COMMERCE_COORDINATOR_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ COMMERCE_COORDINATOR_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ COMMERCE_COORDINATOR_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ COMMERCE_COORDINATOR_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ COMMERCE_COORDINATOR_LOGOUT_URL | default('None') }}", + username: "{{ COMMERCE_COORDINATOR_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ edx_exams_service_name | default('None') }}", + url_root: "{{ EDX_EXAMS_URL_ROOT | default('None') }}", + sso_id: "{{ EDX_EXAMS_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ EDX_EXAMS_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ EDX_EXAMS_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ EDX_EXAMS_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ EDX_EXAMS_LOGOUT_URL | default('None') }}", + username: "{{ EDX_EXAMS_SERVICE_USER_NAME | default('None') }}", + } +# +# OS packages +# + +oauth_client_setup_debian_pkgs: [] + +oauth_client_setup_redhat_pkgs: [] diff --git a/playbooks/roles/oauth_client_setup/tasks/main.yml b/playbooks/roles/oauth_client_setup/tasks/main.yml new file mode 100644 index 00000000000..a70a96f0a2d --- /dev/null +++ b/playbooks/roles/oauth_client_setup/tasks/main.yml @@ -0,0 +1,66 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role oauth_client_setup +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: Create OAuth2 django-oauth-toolkit SSO Applications + shell: > + {{ edxapp_venv_bin }}/python {{ COMMON_BIN_DIR }}/manage.edxapp lms --settings={{ COMMON_EDXAPP_SETTINGS }} + create_dot_application + --grant-type authorization-code + --redirect-uris "{{ item.url_root }}/complete/edx-oauth2/" + --client-id {{ item.sso_id }} + --client-secret {{ item.sso_secret }} + --scopes user_id + --skip-authorization + --update + {{ item.name }}-sso + {{ item.username }} + become_user: "{{ edxapp_user }}" + environment: "{{ edxapp_environment }}" + with_items: "{{ oauth_client_setup_oauth2_clients }}" + when: + - item.name != 'None' + - item.sso_id is defined + - item.sso_id != 'None' + - item.sso_secret is defined + - item.sso_secret != 'None' + +- name: Create OAuth2 django-oauth-toolkit Backend Service Applications + shell: > + {{ edxapp_venv_bin }}/python {{ COMMON_BIN_DIR }}/manage.edxapp lms --settings={{ COMMON_EDXAPP_SETTINGS }} + create_dot_application + --grant-type client-credentials + --client-id {{ item.backend_service_id }} + --client-secret {{ item.backend_service_secret }} + --scopes user_id + --update + {{ item.name }}-backend-service + {{ item.username }} + become_user: "{{ edxapp_user }}" + environment: "{{ edxapp_environment }}" + with_items: "{{ oauth_client_setup_oauth2_clients }}" + when: + - item.name != 'None' + - item.backend_service_id is defined + - item.backend_service_id != 'None' + - item.backend_service_secret is defined + - item.backend_service_secret != 'None' diff --git a/playbooks/roles/opensearch/defaults/main.yml b/playbooks/roles/opensearch/defaults/main.yml new file mode 100644 index 00000000000..e183b74c6f0 --- /dev/null +++ b/playbooks/roles/opensearch/defaults/main.yml @@ -0,0 +1,17 @@ +--- +opensearch_app_dir: "{{ COMMON_APP_DIR }}/opensearch" +opensearch_data_dir: "{{ COMMON_DATA_DIR }}/opensearch" +opensearch_log_dir: "{{ COMMON_LOG_DIR }}/opensearch" +opensearch_cfg_dir: "{{ COMMON_CFG_DIR }}/opensearch" +opensearch_download_url: "https://artifacts.opensearch.org/releases/bundle/opensearch" +opensearch_user: "opensearch" +opensearch_group: "opensearch" + +# +# Defaults for a single server installation. +OPENSEARCH_CLUSTER_MEMBERS: [] +OPENSEARCH_HEAP_SIZE: "512m" +OPENSEARCH_START_TIMEOUT: "1200" +OPENSEARCH_VERSION: "1.2.0" + +systemctl_path: /etc/systemd/system diff --git a/playbooks/roles/opensearch/meta/main.yml b/playbooks/roles/opensearch/meta/main.yml new file mode 100644 index 00000000000..2083f0e1251 --- /dev/null +++ b/playbooks/roles/opensearch/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/playbooks/roles/opensearch/tasks/main.yml b/playbooks/roles/opensearch/tasks/main.yml new file mode 100644 index 00000000000..53aefa67f97 --- /dev/null +++ b/playbooks/roles/opensearch/tasks/main.yml @@ -0,0 +1,97 @@ +--- +- name: Download opensearch {{ OPENSEARCH_VERSION }} + get_url: + url: "{{ opensearch_download_url }}/{{ OPENSEARCH_VERSION }}/opensearch-{{ OPENSEARCH_VERSION }}-linux-x64.tar.gz" + dest: "/tmp/opensearch.tar.gz" + tags: + - install + - install:base + +- name: Create opensearch user + user: + name: "{{ opensearch_user }}" + state: present + shell: /bin/bash + tags: + - install + - install:base + +- name: Create home directory + file: + path: "{{ item }}" + state: directory + owner: "{{ opensearch_user }}" + group: "{{ opensearch_user }}" + with_items: + - "{{ opensearch_app_dir }}" + - "{{ opensearch_data_dir }}" + - "{{ opensearch_log_dir }}" + - "{{ opensearch_cfg_dir }}" + tags: + - install + - install:base + +- name: Extract the tar file + command: chdir=/tmp/ tar -xvzf opensearch.tar.gz -C "{{ opensearch_app_dir }}" --strip-components=1 + tags: + - install + - install:base + register: opensearch_reinstall + +- name: Copy Configuration File + blockinfile: + block: "{{ lookup('template', 'templates/edx/etc/opensearch/opensearch.yml.j2') }}" + dest: "{{ opensearch_app_dir }}/config/opensearch.yml" + backup: yes + state: present + create: yes + marker: "## {mark} opensearch main configuration ##" + owner: "{{ opensearch_user }}" + group: "{{ opensearch_user }}" + mode: 0600 + tags: + - install + - install:base + +- name: Copy jvm.options File for Instance + template: + src: edx/etc/opensearch/jvm.options.j2 + dest: "{{ opensearch_app_dir }}/config/jvm.options" + owner: "{{ opensearch_user }}" + group: "{{ opensearch_user }}" + mode: 0600 + force: yes + tags: + - install + - install:base + +- name: Create systemd service + template: + src: lib/systemd/system/opensearch.service.j2 + dest: "{{ systemctl_path }}/opensearch.service" + tags: + - install + - install:base + +- name: Make sure opensearch is started + service: + name: opensearch + state: started + enabled: yes + tags: + - install + - install:base + +- name: Get all the installed OpenSearch plugins + command: "/edx/app/opensearch/bin/opensearch-plugin list" + register: list_plugins + tags: + - install + - install:base + +- name: Show all the installed OpenSearch plugins + debug: + msg: "{{ list_plugins.stdout }}" + tags: + - install + - install:base diff --git a/playbooks/roles/opensearch/templates/edx/etc/opensearch/jvm.options.j2 b/playbooks/roles/opensearch/templates/edx/etc/opensearch/jvm.options.j2 new file mode 100644 index 00000000000..130eee4499c --- /dev/null +++ b/playbooks/roles/opensearch/templates/edx/etc/opensearch/jvm.options.j2 @@ -0,0 +1,77 @@ +# {{ ansible_managed }} + +## JVM configuration + +################################################################ +## IMPORTANT: JVM heap size +################################################################ +## +## You should always set the min and max JVM heap +## size to the same value. For example, to set +## the heap to 4 GB, set: +## +## -Xms4g +## -Xmx4g +## +## +################################################################ + +# Xms represents the initial size of total heap space +# Xmx represents the maximum size of total heap space + +-Xms{{ OPENSEARCH_HEAP_SIZE }} +-Xmx{{ OPENSEARCH_HEAP_SIZE }} + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC configuration +8-13:-XX:+UseConcMarkSweepGC +8-13:-XX:CMSInitiatingOccupancyFraction=75 +8-13:-XX:+UseCMSInitiatingOccupancyOnly + +## G1GC Configuration +# NOTE: G1 GC is only supported on JDK version 10 or later +# to use G1GC, uncomment the next two lines and update the version on the +# following three lines to your version of the JDK +# 10-13:-XX:-UseConcMarkSweepGC +# 10-13:-XX:-UseCMSInitiatingOccupancyOnly +14-:-XX:+UseG1GC +14-:-XX:G1ReservePercent=25 +14-:-XX:InitiatingHeapOccupancyPercent=30 + +## JVM temporary directory +-Djava.io.tmpdir=${OPENSEARCH_TMPDIR} + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps; ensure the directory exists and +# has sufficient space +-XX:HeapDumpPath={{ opensearch_data_dir }} + +# specify an alternative path for JVM fatal error logs +-XX:ErrorFile={{ opensearch_log_dir }}/hs_err_pid%p.log + +## JDK 8 GC logging +8:-XX:+PrintGCDetails +8:-XX:+PrintGCDateStamps +8:-XX:+PrintTenuringDistribution +8:-XX:+PrintGCApplicationStoppedTime +8:-Xloggc:{{ opensearch_log_dir }}/gc.log +8:-XX:+UseGCLogFileRotation +8:-XX:NumberOfGCLogFiles=32 +8:-XX:GCLogFileSize=64m + +# JDK 9+ GC logging +9-:-Xlog:gc*,gc+age=trace,safepoint:file={{ opensearch_log_dir }}/gc.log:utctime,pid,tags:filecount=32,filesize=64m diff --git a/playbooks/roles/opensearch/templates/edx/etc/opensearch/opensearch.yml.j2 b/playbooks/roles/opensearch/templates/edx/etc/opensearch/opensearch.yml.j2 new file mode 100644 index 00000000000..a1795e8dd52 --- /dev/null +++ b/playbooks/roles/opensearch/templates/edx/etc/opensearch/opensearch.yml.j2 @@ -0,0 +1,97 @@ +# {{ ansible_managed }} + +# ======================== Opensearch Configuration ========================= +# +# NOTE: Opensearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. +# +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. +# +# ---------------------------------- Cluster ----------------------------------- +# +# Use a descriptive name for your cluster: +# +#cluster.name: my-application +# +# ------------------------------------ Node ------------------------------------ +# +# Use a descriptive name for the node: +# +#node.name: node-1 +# +# Add custom attributes to the node: +# +#node.attr.rack: r1 +# +# ----------------------------------- Paths ------------------------------------ +# +# Path to directory where to store the data (separate multiple locations by comma): +# +path.data: {{ opensearch_data_dir }} +# +# Path to log files: +# +path.logs: {{ opensearch_log_dir }} +# +# ----------------------------------- Memory ----------------------------------- +# +# Lock the memory on startup: +# +bootstrap.memory_lock: true +# +# Make sure that the heap size is set to about half the memory available +# on the system and that the owner of the process is allowed to use this +# limit. +# +# Opensearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# Set the bind address to a specific IP (IPv4 or IPv6): +# +#network.host: 192.168.0.1 +# +# Set a custom port for HTTP: +# +http.port: 9202 +# +{% if vagrant_cluster|bool %} +network.host: {{ ansible_ssh_host }} +{% endif %} +# For more information, consult the network module documentation. +# +# --------------------------------- Discovery ---------------------------------- +# +# Pass an initial list of hosts to perform discovery when this node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] +# +#discovery.seed_hosts: ["host1", "host2"] +{% if OPENSEARCH_CLUSTER_MEMBERS|length > 1 -%} +discovery.seed_hosts: ['{{OPENSEARCH_CLUSTER_MEMBERS|join("\',\'") }}'] +{% endif -%} +# +discovery.type: single-node +# Bootstrap the cluster using an initial set of master-eligible nodes: +# +#cluster.initial_master_nodes: ["node-1", "node-2"] +# +# For more information, consult the discovery and cluster formation module documentation. +# +# ---------------------------------- Gateway ----------------------------------- +# +# Block initial recovery after a full cluster restart until N nodes are started: +# +#gateway.recover_after_nodes: 3 +# +# For more information, consult the gateway module documentation. +# +# ---------------------------------- Various ----------------------------------- +# +# Require explicit names when deleting indices: +# +#action.destructive_requires_name: true +# +# ---------------------------------- Plugins ----------------------------------- +plugins.security.disabled: true \ No newline at end of file diff --git a/playbooks/roles/opensearch/templates/lib/systemd/system/opensearch.service.j2 b/playbooks/roles/opensearch/templates/lib/systemd/system/opensearch.service.j2 new file mode 100644 index 00000000000..b5d2b030fc7 --- /dev/null +++ b/playbooks/roles/opensearch/templates/lib/systemd/system/opensearch.service.j2 @@ -0,0 +1,51 @@ +[Unit] +Description=opensearch +Wants=network-online.target +After=network-online.target + +[Service] +RuntimeDirectory=opensearch +PrivateTmp=true + +WorkingDirectory={{ opensearch_app_dir }} + +User={{ opensearch_user }} +Group={{ opensearch_group }} + +ExecStart={{ opensearch_app_dir }}/bin/opensearch -p {{ opensearch_app_dir }}/opensearch.pid -q + +StandardOutput=journal +StandardError=inherit + +# Specifies the maximum file descriptor number that can be opened by this process +LimitNOFILE=65536 + +# Specifies the memory lock settings +LimitMEMLOCK=infinity + +# Specifies the maximum number of processes +LimitNPROC=4096 + +# Specifies the maximum size of virtual memory +LimitAS=infinity + +# Specifies the maximum file size +LimitFSIZE=infinity + +# Disable timeout logic and wait until process is stopped +TimeoutStopSec=0 + +# SIGTERM signal is used to stop the Java process +KillSignal=SIGTERM + +# Send the signal only to the JVM rather than its control group +KillMode=process + +# Java process is never killed +SendSIGKILL=no + +# When a JVM receives a SIGTERM signal it exits with code 143 +SuccessExitStatus=143 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/playbooks/roles/openstack/defaults/main.yml b/playbooks/roles/openstack/defaults/main.yml new file mode 100644 index 00000000000..0b192e217a1 --- /dev/null +++ b/playbooks/roles/openstack/defaults/main.yml @@ -0,0 +1,41 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role openstack +# + +# Both of these vars are required to work-around +# some ansible variable precedence issues with +# circular dependencies introduced in the openstack PR. +# More investigation is required to determine the optimal +# solution. +vhost_name: openstack +VHOST_NAME: "{{ vhost_name }}" + +# Credentials for log sync script +SWIFT_LOG_SYNC_USERNAME: '' +SWIFT_LOG_SYNC_PASSWORD: '' +SWIFT_LOG_SYNC_TENANT_ID: '' +SWIFT_LOG_SYNC_TENANT_NAME: '' +SWIFT_LOG_SYNC_AUTH_URL: '' +SWIFT_LOG_SYNC_REGION_NAME: '' + +openstack_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/openstack.txt" + +openstack_log_sync_script: "{{ vhost_dirs.home.path }}/send-logs-to-swift" +openstack_log_sync_script_environment: "{{ vhost_dirs.home.path }}/log-sync-env.sh" +openstack_swift_logfile: "{{ vhost_dirs.logs.path }}/log-sync.log" + +openstack_debian_pkgs: + - python-setuptools + +openstack_pip_pkgs: + - python-keystoneclient + - python-swiftclient diff --git a/playbooks/roles/openstack/meta/main.yml b/playbooks/roles/openstack/meta/main.yml new file mode 100644 index 00000000000..35482345055 --- /dev/null +++ b/playbooks/roles/openstack/meta/main.yml @@ -0,0 +1,15 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role openstack +# +dependencies: + - role: vhost + VHOST_NAME: "{{ vhost_name }}" diff --git a/playbooks/roles/openstack/tasks/main.yml b/playbooks/roles/openstack/tasks/main.yml new file mode 100644 index 00000000000..f0c5b878120 --- /dev/null +++ b/playbooks/roles/openstack/tasks/main.yml @@ -0,0 +1,67 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role openstack +# + +- name: Install system packages + apt: + pkg: "{{','.join(openstack_debian_pkgs)}}" + state: present + update_cache: yes + +- name: Install openstack python packages + pip: + name: "{{ item }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + with_items: "{{ openstack_pip_pkgs }}" + +- name: Create log sync script + template: + src: send-logs-to-swift.j2 + dest: "{{ openstack_log_sync_script }}" + mode: 0755 + owner: root + group: root + when: COMMON_OBJECT_STORE_LOG_SYNC + +- name: Upload openstack credentials for log script + template: + src: log-sync-env.sh.j2 + dest: "{{ openstack_log_sync_script_environment }}" + mode: 0600 + owner: root + group: root + when: COMMON_OBJECT_STORE_LOG_SYNC + +- name: Create symlink for log sync script + file: + state: link + src: "{{ openstack_log_sync_script }}" + dest: "{{ COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT }}" + when: COMMON_OBJECT_STORE_LOG_SYNC + +# Install openstack python requirements into {{ edxapp_venv_dir }} +- name : Install python requirements + # Need to use command rather than pip so that we can maintain the context of our current working directory; + # some requirements are pathed relative to the edx-platform repo. + # Using the pip from inside the virtual environment implicitly installs everything into that virtual environment. + command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ openstack_requirements_file }}" + args: + chdir: "{{ edxapp_code_dir }}" + become: true + become_user: "{{ edxapp_user }}" + environment: "{{ edxapp_environment }}" + when: edxapp_code_dir is defined + tags: + - install + - install:app-requirements diff --git a/playbooks/roles/openstack/templates/log-sync-env.sh.j2 b/playbooks/roles/openstack/templates/log-sync-env.sh.j2 new file mode 100644 index 00000000000..fcc69567921 --- /dev/null +++ b/playbooks/roles/openstack/templates/log-sync-env.sh.j2 @@ -0,0 +1,6 @@ +export OS_USERNAME='{{ SWIFT_LOG_SYNC_USERNAME }}' +export OS_PASSWORD='{{ SWIFT_LOG_SYNC_PASSWORD }}' +export OS_TENANT_ID='{{ SWIFT_LOG_SYNC_TENANT_ID }}' +export OS_TENANT_NAME='{{ SWIFT_LOG_SYNC_TENANT_NAME }}' +export OS_AUTH_URL='{{ SWIFT_LOG_SYNC_AUTH_URL }}' +export OS_REGION_NAME='{{ SWIFT_LOG_SYNC_REGION_NAME }}' diff --git a/playbooks/roles/openstack/templates/send-logs-to-swift.j2 b/playbooks/roles/openstack/templates/send-logs-to-swift.j2 new file mode 100644 index 00000000000..8c24cf43459 --- /dev/null +++ b/playbooks/roles/openstack/templates/send-logs-to-swift.j2 @@ -0,0 +1,81 @@ +#!/bin/bash +# +# This script can be called from logrotate to sync logs to swift. Based on +# the existing S3 implementation +# + +if (( $EUID != 0 )); then + echo "Please run as the root user" + exit 1 +fi + +# Ensure the log processors can read without running as root +if [ ! -f "{{ openstack_swift_logfile }}" ]; then + touch "{{ openstack_swift_logfile }}" +fi +chown syslog:syslog "{{ openstack_swift_logfile }}" + +exec > >(tee -a "{{ openstack_swift_logfile }}") +exec 2>&1 + +usage() { + + cat< - name=ora - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - when: ora_installed is defined and not devstack - -- name: restart ora_celery - supervisorctl_local: > - name=ora_celery - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - when: ora_installed is defined and not devstack diff --git a/playbooks/roles/ora/meta/main.yml b/playbooks/roles/ora/meta/main.yml deleted file mode 100644 index 107f1e98c29..00000000000 --- a/playbooks/roles/ora/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - supervisor diff --git a/playbooks/roles/ora/tasks/deploy.yml b/playbooks/roles/ora/tasks/deploy.yml deleted file mode 100644 index 55c946061d9..00000000000 --- a/playbooks/roles/ora/tasks/deploy.yml +++ /dev/null @@ -1,117 +0,0 @@ -- name: create supervisor scripts - ora, ora_celery - template: > - src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf - owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 - notify: - - restart ora - - restart ora_celery - with_items: ['ora', 'ora_celery'] - when: not devstack - -- include: ease.yml - -- name: create ora application config - template: src=ora.env.json.j2 dest={{ora_app_dir}}/ora.env.json - sudo_user: "{{ ora_user }}" - -- name: create ora auth file - template: src=ora.auth.json.j2 dest={{ora_app_dir}}/ora.auth.json - sudo_user: "{{ ora_user }}" - -- name: setup the ora env - notify: - - "restart ora" - - "restart ora_celery" - template: > - src=ora_env.j2 dest={{ ora_app_dir }}/ora_env - owner={{ ora_user }} group={{ common_web_user }} - mode=0644 - -# Do A Checkout -- name: git checkout ora repo into {{ ora_app_dir }} - git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }} - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - -# TODO: Check git.py _run_if_changed() to see if the logic there to skip running certain -# portions of the deploy needs to be incorporated here. - - -# Install the python pre requirements into {{ ora_venv_dir }} -- name: install python pre-requirements - pip: requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - -# Install the python post requirements into {{ ora_venv_dir }} -- name: install python post-requirements - pip: requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - - #Needed if using redis to prevent memory issues -- name: change memory commit settings -- needed for redis - command: sysctl vm.overcommit_memory=1 - notify: - - restart ora - - restart ora_celery - -- name: syncdb and migrate - shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ora_code_dir}} - when: migrate_db is defined and migrate_db|lower == "yes" - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - -- name: create users - shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ora_code_dir}} - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - - - # call supervisorctl update. this reloads - # the supervisorctl config and restarts - # the services if any of the configurations - # have changed. - # -- name: update supervisor configuration - shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" - register: supervisor_update - when: not devstack - changed_when: supervisor_update.stdout != "" - -- name: ensure ora is started - supervisorctl_local: > - name=ora - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - when: not devstack - -- name: ensure ora_celery is started - supervisorctl_local: > - name=ora_celery - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - when: not devstack - -- name: create a symlink for venv python - file: > - src="http://github.com/%7B%7B%20ora_venv_bin%20%7D%7D/%7B%7B%20item%20%7D%7D" - dest={{ COMMON_BIN_DIR }}/{{ item }}.ora - state=link - with_items: - - python - - pip - -- set_fact: ora_installed=true diff --git a/playbooks/roles/ora/tasks/ease.yml b/playbooks/roles/ora/tasks/ease.yml deleted file mode 100644 index ed9ba1d4238..00000000000 --- a/playbooks/roles/ora/tasks/ease.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Do A Checkout -- name: git checkout ease repo into its base dir - git: dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}} - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - -- name: install ease system packages - apt: pkg={{item}} state=present - with_items: ora_ease_debian_pkgs - notify: - - restart ora - - restart ora_celery - - -# Install the python pre requirements into {{ ora_ease_venv_dir }} -- name: install ease python pre-requirements - pip: requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - -# Install the python post requirements into {{ ora_ease_venv_dir }} -- name: install ease python post-requirements - pip: requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - -- name: install ease python package - shell: > - . {{ ora_ease_venv_dir }}/bin/activate; cd {{ ora_ease_code_dir }}; python setup.py install - sudo_user: "{{ ora_user }}" - notify: - - restart ora - - restart ora_celery - -- name: download and install nltk - shell: | - set -e - curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }} - tar zxf {{ ora_nltk_tmp_file }} - rm -f {{ ora_nltk_tmp_file }} - touch {{ ora_nltk_download_url|basename }}-installed - creates={{ ora_data_dir }}/{{ ora_nltk_download_url|basename }}-installed - chdir={{ ora_data_dir }} - sudo_user: "{{ common_web_user }}" - notify: - - restart ora - - restart ora_celery diff --git a/playbooks/roles/ora/tasks/main.yml b/playbooks/roles/ora/tasks/main.yml deleted file mode 100644 index 7c3bfa85934..00000000000 --- a/playbooks/roles/ora/tasks/main.yml +++ /dev/null @@ -1,53 +0,0 @@ -# requires: -# - group_vars/all -# - common/tasks/main.yml ---- - -- name: create application user - user: > - name="{{ ora_user }}" home="{{ ora_app_dir }}" - createhome=no shell=/bin/false - notify: - - restart ora - - restart ora_celery - -- name: create ora app dir - file: > - path="{{ item }}" state=directory - owner="{{ ora_user }}" group="{{ common_web_group }}" - notify: - - restart ora - - restart ora_celery - with_items: - - "{{ ora_venvs_dir }}" - - "{{ ora_app_dir }}" - -- name: create ora data dir, owned by {{ common_web_user }} - file: > - path="{{ item }}" state=directory - owner="{{ common_web_user }}" group="{{ common_web_group }}" - notify: - - restart ora - - restart ora_celery - with_items: - - "{{ ora_data_dir }}" - - "{{ ora_data_course_dir }}" - - "{{ ora_app_dir }}/ml_models" - -- name: install debian packages that ora needs - apt: pkg={{item}} state=present - notify: - - restart ora - - restart ora_celery - with_items: ora_debian_pkgs - -- name: install debian packages for ease that ora needs - apt: pkg={{item}} state=present - notify: - - restart ora - - restart ora_celery - with_items: ora_ease_debian_pkgs - -- include: deploy.yml tags=deploy - - diff --git a/playbooks/roles/ora/templates/ora.auth.json.j2 b/playbooks/roles/ora/templates/ora.auth.json.j2 deleted file mode 100644 index 7acdca0c18a..00000000000 --- a/playbooks/roles/ora/templates/ora.auth.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ ora_auth_config | to_nice_json }} diff --git a/playbooks/roles/ora/templates/ora.conf.j2 b/playbooks/roles/ora/templates/ora.conf.j2 deleted file mode 100644 index ffc84f743b2..00000000000 --- a/playbooks/roles/ora/templates/ora.conf.j2 +++ /dev/null @@ -1,14 +0,0 @@ -[program:ora] - -command={{ ora_venv_bin }}/gunicorn --preload -b {{ ora_gunicorn_host }}:{{ ora_gunicorn_port }} -w {{ ora_gunicorn_workers }} --timeout=90 --pythonpath={{ ora_code_dir}} edx_ora.wsgi - -user={{ common_web_user }} -directory={{ ora_code_dir }} - -environment=PID=/var/run/gunicorn/edx-ora.pid,WORKERS={{ ora_gunicorn_workers }},PORT={{ ora_gunicorn_port }},ADDRESS={{ ora_gunicorn_host }},LANG={{ ORA_LANG }},DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora,NLTK_DATA={{ ora_nltk_data_dir }} - -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true - diff --git a/playbooks/roles/ora/templates/ora.env.json.j2 b/playbooks/roles/ora/templates/ora.env.json.j2 deleted file mode 100644 index c72c8c169c6..00000000000 --- a/playbooks/roles/ora/templates/ora.env.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ ora_env_config | to_nice_json }} diff --git a/playbooks/roles/ora/templates/ora_celery.conf.j2 b/playbooks/roles/ora/templates/ora_celery.conf.j2 deleted file mode 100644 index d86d580b747..00000000000 --- a/playbooks/roles/ora/templates/ora_celery.conf.j2 +++ /dev/null @@ -1,14 +0,0 @@ -[program:ora_celery] - -command={{ ora_venv_bin }}/python {{ ora_code_dir }}/manage.py celeryd --loglevel=info --settings=edx_ora.aws --pythonpath={{ ora_code_dir }} -B --autoscale=4,1 --schedule={{ ora_data_dir }}/celerybeat-schedule - -user={{ common_web_user }} -directory={{ ora_code_dir }} - -environment=DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora,NLTK_DATA={{ ora_nltk_data_dir }} - -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true - diff --git a/playbooks/roles/ora/templates/ora_env.j2 b/playbooks/roles/ora/templates/ora_env.j2 deleted file mode 100644 index 45e34fcbc10..00000000000 --- a/playbooks/roles/ora/templates/ora_env.j2 +++ /dev/null @@ -1,7 +0,0 @@ -# {{ ansible_managed }} -{% for name,value in ora_environment.items() %} -{%- if value %} -export {{ name }}="{{ value }}" - -{% endif %} -{% endfor %} diff --git a/playbooks/roles/oraclejdk/defaults/main.yml b/playbooks/roles/oraclejdk/defaults/main.yml index b99d6eaeda0..77b1e0653e8 100644 --- a/playbooks/roles/oraclejdk/defaults/main.yml +++ b/playbooks/roles/oraclejdk/defaults/main.yml @@ -1,11 +1,16 @@ --- -oraclejdk_version: "7u25" +ORACLEJDK_VERSION: "8u131" # what the archive unpacks to -oraclejdk_base: "jdk1.7.0_25" -oraclejdk_build: "b15" +oraclejdk_base: "jdk1.8.0_131" +oraclejdk_build: "b11" oraclejdk_platform: "linux" oraclejdk_arch: "x64" -oraclejdk_file: "jdk-{{ oraclejdk_version }}-{{ oraclejdk_platform }}-{{ oraclejdk_arch }}.tar.gz" -oraclejdk_url: "http://download.oracle.com/otn-pub/java/jdk/%7B%7B%20oraclejdk_version%20%7D%7D-%7B%7B%20oraclejdk_build%20%7D%7D/%7B%7B%20oraclejdk_file%20%7D%7D" -oraclejdk_link: "/usr/lib/jvm/java-7-oracle" +oraclejdk_file: "jdk-{{ ORACLEJDK_VERSION }}-{{ oraclejdk_platform }}-{{ oraclejdk_arch }}.tar.gz" + +oraclejdk_url: "http://download.oracle.com/otn-pub/java/jdk/%7B%7B%20ORACLEJDK_VERSION%20%7D%7D-%7B%7B%20oraclejdk_build%20%7D%7D/d54c1d3a095b4ff2b6607d096fa80163/%7B%7B%20oraclejdk_file%20%7D%7D" + +oraclejdk_link: "/usr/lib/jvm/java-8-oracle" + +oraclejdk_debian_pkgs: + - curl diff --git a/playbooks/roles/oraclejdk/tasks/main.yml b/playbooks/roles/oraclejdk/tasks/main.yml index 3add5e51bbf..0749a483d87 100644 --- a/playbooks/roles/oraclejdk/tasks/main.yml +++ b/playbooks/roles/oraclejdk/tasks/main.yml @@ -1,5 +1,4 @@ --- - # oraclejdk # # Dependencies: @@ -12,44 +11,57 @@ # - common # - oraclejdk -- name: check for Oracle Java version {{ oraclejdk_base }} - command: test -d /usr/lib/jvm/{{ oraclejdk_base }} - ignore_errors: true - register: oraclejdk_present - -- name: download Oracle Java - shell: > - curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }} - executable=/bin/bash - chdir=/var/tmp - creates=/var/tmp/{{ oraclejdk_file }} - when: oraclejdk_present|failed +- name: Install debian needed pkgs + apt: + name: "{{ item }}" + with_items: "{{ oraclejdk_debian_pkgs }}" -- name: install Oracle Java - shell: > - mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }} - creates=/usr/lib/jvm/{{ oraclejdk_base }} - executable=/bin/bash +- name: Download Oracle Java + get_url: + url: "{{ oraclejdk_url }}" + headers: + Cookie: oraclelicense=accept-securebackup-cookie + dest: "/var/tmp/{{ oraclejdk_file }}" + retries: 3 + delay: 10 + register: oracle_jdk_download_retry + until: oracle_jdk_download_retry is succeeded - sudo: true - when: oraclejdk_present|failed +- name: Create jvm dir + file: + path: /usr/lib/jvm + state: directory + owner: root + group: root -- name: create symlink expected by elasticsearch - file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link - when: oraclejdk_present|failed +- name: Untar Oracle Java + unarchive: + src: "/var/tmp/{{ oraclejdk_file }}" + dest: "/usr/lib/jvm" + copy: no -- name: update alternatives java - shell: > - update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/java" 1 +- name: Create symlink expected by elasticsearch + file: + src: "/usr/lib/jvm/{{ oraclejdk_base }}" + dest: "{{ oraclejdk_link }}" + state: link + force: yes -- name: update alternatives javac - shell: > - update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javac" 1 +- name: Update alternatives java + alternatives: + name: "{{ item }}" + link: "/usr/bin/{{ item }}" + path: "/usr/lib/jvm/{{ oraclejdk_base }}/bin/{{ item }}" + with_items: + - java + - javac + - javaws + - jarsigner -- name: update alternatives javaws - shell: > - update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javaws" 1 - -- name: add JAVA_HOME for Oracle Java - template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755 - when: oraclejdk_present|failed +- name: Add JAVA_HOME for Oracle Java + template: + src: "java.sh.j2" + dest: "/etc/profile.d/java.sh" + owner: root + group: root + mode: "0755" diff --git a/playbooks/roles/oraclejdk/templates/java.sh.j2 b/playbooks/roles/oraclejdk/templates/java.sh.j2 index 0562b22beb7..5b67a9d45de 100644 --- a/playbooks/roles/oraclejdk/templates/java.sh.j2 +++ b/playbooks/roles/oraclejdk/templates/java.sh.j2 @@ -1,2 +1,2 @@ -export JAVA_HOME="{{oraclejdk_link}}" +export JAVA_HOME="{{ oraclejdk_link }}" export PATH=$JAVA_HOME/bin:$PATH diff --git a/playbooks/roles/payment/defaults/main.yml b/playbooks/roles/payment/defaults/main.yml new file mode 100644 index 00000000000..466d339fa0d --- /dev/null +++ b/playbooks/roles/payment/defaults/main.yml @@ -0,0 +1,6 @@ +payment_env_extra: + APPLE_PAY_COUNTRY_CODE: "{{ PAYMENT_APPLE_PAY_COUNTRY_CODE }}" + STRIPE_PUBLISHABLE_KEY: "{{ PAYMENT_STRIPE_PUBLISHABLE_KEY }}" + STRIPE_RESPONSE_URL: "{{ PAYMENT_STRIPE_RESPONSE_URL }}" + WAFFLE_FLAGS: "{{ PAYMENT_WAFFLE_FLAGS|default(omit) }}" + COMMERCE_COORDINATOR_BASE_URL: "{{ PAYMENT_COMMERCE_COORDINATOR_BASE_URL }}" diff --git a/playbooks/roles/payment/meta/main.yml b/playbooks/roles/payment/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/payment/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/payment/tasks/main.yml b/playbooks/roles/payment/tasks/main.yml new file mode 100644 index 00000000000..bab877de787 --- /dev/null +++ b/playbooks/roles/payment/tasks/main.yml @@ -0,0 +1,6 @@ +- name: Build Payment MFE + include_role: + name: mfe + vars: + MFE_ENVIRONMENT_EXTRA: '{{ payment_env_extra | default(MFE_DEPLOY_ENVIRONMENT_EXTRA) }}' + MFE_VERSION: "{{ PAYMENT_MFE_VERSION | default('master') }}" diff --git a/playbooks/roles/postfix_queue/defaults/main.yml b/playbooks/roles/postfix_queue/defaults/main.yml new file mode 100644 index 00000000000..6b09b01bd51 --- /dev/null +++ b/playbooks/roles/postfix_queue/defaults/main.yml @@ -0,0 +1,52 @@ +--- + +# postfix_queue: Configure a local postfix server to forward mail to an +# external SMTP server. This way postfix acts as an outgoing mail queue, and +# web apps can send mail instantly, while still taking advantage of an +# external SMTP service. +# +# The external service is assumed to use TLS. +# +# You must leave the edxapp role's EDXAPP_EMAIL_foo settings at their default +# values in order for the postfix queue to be used. + +POSTFIX_QUEUE_EXTERNAL_SMTP_HOST: '' +POSTFIX_QUEUE_EXTERNAL_SMTP_PORT: 587 +POSTFIX_QUEUE_EXTERNAL_SMTP_USER: '' +POSTFIX_QUEUE_EXTERNAL_SMTP_PASSWORD: '' + +# Set this to content of sender_canonical_maps postfix configuration file (optional). +# Example: +# POSTFIX_QUEUE_SENDER_CANONICAL_MAPS: |- +# @internal @external.com +# someuser@example.com otheruser@myschool.org +POSTFIX_QUEUE_SENDER_CANONICAL_MAPS: '' + +# Set this to content of recipient_canonical_maps postfix configuration file (optional). +# Example: +# POSTFIX_QUEUE_RECIPIENT_CANONICAL_MAPS: |- +# @internal @external.com +# someuser@example.com otheruser@myschool.org +POSTFIX_QUEUE_RECIPIENT_CANONICAL_MAPS: '' + +# Set this to content of header_checks postfix configuration file (optional). +# Example: +# POSTFIX_QUEUE_HEADER_CHECKS: |- +# /^From:(.*)$/ PREPEND Reply-To:$1 +# /^Subject:.*spam/ DISCARD +POSTFIX_QUEUE_HEADER_CHECKS: '' + +# Internal vars: + +postfix_queue_password_file: "/etc/postfix/sasl/passwd" +postfix_queue_sender_canonical_maps_file: "/etc/postfix/sender_canonical_maps" +postfix_queue_recipient_canonical_maps_file: "/etc/postfix/recipient_canonical_maps" +postfix_queue_header_checks_file: "/etc/postfix/header_checks" + +postfix_queue_smtp_sasl_auth_enable: "yes" +postfix_queue_smtp_sasl_password_maps: "hash:{{ postfix_queue_password_file }}" +postfix_queue_smtp_sasl_mechanism_filter: "" +postfix_queue_smtp_sasl_security_options: "" +postfix_queue_relayhost: "{{ POSTFIX_QUEUE_EXTERNAL_SMTP_HOST }}:{{ POSTFIX_QUEUE_EXTERNAL_SMTP_PORT }}" +postfix_queue_smtp_tls_security_level: "encrypt" +postfix_queue_smtp_tls_mandatory_ciphers: "high" diff --git a/playbooks/roles/postfix_queue/tasks/main.yml b/playbooks/roles/postfix_queue/tasks/main.yml new file mode 100644 index 00000000000..0dc3fb4a82c --- /dev/null +++ b/playbooks/roles/postfix_queue/tasks/main.yml @@ -0,0 +1,90 @@ +--- + +# postfix_queue: Configure a local postfix server to forward mail to an +# external SMTP server. This way postfix acts as an outgoing mail queue, and +# web apps can send mail instantly, while still taking advantage of an +# external SMTP service. + +- name: install postfix + apt: pkg=postfix state=present + +- name: Backup original postfix main.cf + command: cp /etc/postfix/main.cf /etc/postfix/main.cf.backup + args: + creates: /etc/postfix/main.cf.backup + +- name: Configure postfix + command: postconf -e '{{ item }}' + with_items: + - "smtp_sasl_auth_enable = {{ postfix_queue_smtp_sasl_auth_enable }}" + - "smtp_sasl_password_maps = {{ postfix_queue_smtp_sasl_password_maps }}" + - "smtp_sasl_mechanism_filter = {{ postfix_queue_smtp_sasl_mechanism_filter }}" + - "smtp_sasl_security_options = {{ postfix_queue_smtp_sasl_security_options }}" + - "relayhost = {{ postfix_queue_relayhost }}" + - "smtp_tls_security_level = {{ postfix_queue_smtp_tls_security_level }}" + - "smtp_tls_mandatory_ciphers = {{ postfix_queue_smtp_tls_mandatory_ciphers }}" + - "sender_canonical_maps = hash:{{ postfix_queue_sender_canonical_maps_file }}" + - "recipient_canonical_maps = hash:{{ postfix_queue_recipient_canonical_maps_file }}" + - "header_checks = regexp:{{ postfix_queue_header_checks_file }}" + +- name: Explain postfix authentication + lineinfile: + dest: "{{ postfix_queue_password_file }}" + line: "# Configured by Ansible:" + create: yes + +- name: Set permissions of password file + file: path="{{ postfix_queue_password_file }}" state=file mode="0600" owner=root group=root + +- name: Configure postfix authentication + lineinfile: + dest: "{{ postfix_queue_password_file }}" + line: "{{ postfix_queue_relayhost }} {{ POSTFIX_QUEUE_EXTERNAL_SMTP_USER }}:{{ POSTFIX_QUEUE_EXTERNAL_SMTP_PASSWORD }}" + insertafter: "# Configured by Ansible:" + register: postfix_queue_password + +- name: Hash postfix SASL password + command: "postmap hash:{{ postfix_queue_password_file }}" + when: postfix_queue_password.changed + +- name: Configure postfix sender canonical maps + copy: + dest: "{{ postfix_queue_sender_canonical_maps_file }}" + content: "# Configured by Ansible:\n{{ POSTFIX_QUEUE_SENDER_CANONICAL_MAPS }}" + force: true + owner: root + group: root + mode: "0600" + register: postfix_queue_sender_canonical_maps + +- name: Hash postfix sender canonical maps file + command: "postmap hash:{{ postfix_queue_sender_canonical_maps_file }}" + when: postfix_queue_sender_canonical_maps.changed + +- name: Configure postfix recipient canonical maps + copy: + dest: "{{ postfix_queue_recipient_canonical_maps_file }}" + content: "# Configured by Ansible:\n{{ POSTFIX_QUEUE_RECIPIENT_CANONICAL_MAPS }}" + force: true + owner: root + group: root + mode: "0600" + register: postfix_queue_recipient_canonical_maps + +- name: Hash postfix recipient canonical maps file + command: "postmap hash:{{ postfix_queue_recipient_canonical_maps_file }}" + when: postfix_queue_recipient_canonical_maps.changed + +- name: Configure postfix header checks + copy: + dest: "{{ postfix_queue_header_checks_file }}" + content: "# Configured by Ansible:\n{{ POSTFIX_QUEUE_HEADER_CHECKS }}" + force: true + owner: root + group: root + mode: "0600" + +- name: Restart Postfix + service: + name: postfix + state: restarted diff --git a/playbooks/roles/program_console/defaults/main.yml b/playbooks/roles/program_console/defaults/main.yml new file mode 100644 index 00000000000..0e88b32de50 --- /dev/null +++ b/playbooks/roles/program_console/defaults/main.yml @@ -0,0 +1,62 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# + +PROGRAM_CONSOLE_URL_ROOT: !!null +PROGRAM_CONSOLE_LMS_BASE_URL: !!null +PROGRAM_CONSOLE_REGISTRAR_API_BASE_URL: !!null +PROGRAM_CONSOLE_DISCOVERY_BASE_URL: !!null +PROGRAM_CONSOLE_NGINX_PORT: 80 +PROGRAM_CONSOLE_SSL_NGINX_PORT: 443 + +program_console_home: '{{ COMMON_APP_DIR }}/{{ program_console_service_name }}' +NVM_DIR: '{{ program_console_home }}' +program_console_user: 'root' +program_console_git_identity: 'none' +program_console_repo: 'https://github.com/openedx/frontend-app-program-console.git' +PROGRAM_CONSOLE_VERSION: 'master' +program_console_service_name: 'program-console' +PROGRAM_CONSOLE_NODE_VERSION: '18.17.0' +program_console_nodeenv_dir: '{{ program_console_home }}/nodeenvs/{{ program_console_service_name }}' +program_console_nodeenv_bin: '{{program_console_nodeenv_dir}}/bin' +program_console_app_dir: "{{ COMMON_APP_DIR }}/program-console" +program_console_code_dir: "{{ program_console_app_dir }}/program-console" +program_console_dist_dir: "{{ program_console_code_dir }}/dist" +program_console_env_vars: + PATH: "{{ program_console_nodeenv_bin }}:{{ ansible_env.PATH }}" + NODE_ENV: "production" + ACTIVE_ENV: "production" + BASE_URL: "{{ PROGRAM_CONSOLE_URL_ROOT }}" + LMS_BASE_URL: "{{ PROGRAM_CONSOLE_LMS_BASE_URL }}" + REGISTRAR_API_BASE_URL: "{{ PROGRAM_CONSOLE_REGISTRAR_API_BASE_URL }}" + DISCOVERY_BASE_URL: "{{ PROGRAM_CONSOLE_DISCOVERY_BASE_URL }}" + LOGIN_URL: '{{ COMMON_LMS_BASE_URL }}/login' + LOGOUT_URL: '{{ COMMON_LMS_BASE_URL }}/logout' + CSRF_TOKEN_API_PATH: '/csrf/api/v1/token' + REFRESH_ACCESS_TOKEN_ENDPOINT: '{{ COMMON_LMS_BASE_URL }}/login_refresh' + ACCESS_TOKEN_COOKIE_NAME: 'edx-jwt-cookie-header-payload' + USER_INFO_COOKIE_NAME: 'edx-user-info' + MARKETING_SITE_BASE_URL: 'https://stage.edx.org/' + SUPPORT_URL: 'https://stage.edx.org/support' + CONTACT_URL: 'https://stage.edx.org/contact' + OPEN_SOURCE_URL: 'https://stage.edx.org/openedx' + TERMS_OF_SERVICE_URL: 'https://stage.edx.org/terms-of-service' + PRIVACY_POLICY_URL: 'https://stage.edx.org/privacy-policy' + FACEBOOK_URL: 'https://www.facebook.com/' + TWITTER_URL: 'https://twitter.com/' + YOU_TUBE_URL: 'https://www.youtube.com/' + LINKED_IN_URL: 'https://www.linkedin.com/' + GOOGLE_PLUS_URL: 'https://plus.google.com/' + REDDIT_URL: 'https://www.reddit.com/' + APPLE_APP_STORE_URL: 'https://www.apple.com/ios/app-store/' + GOOGLE_PLAY_URL: 'https://play.google.com/store' + SITE_NAME: '' + SEGMENT_KEY: '' diff --git a/playbooks/roles/program_console/meta/main.yml b/playbooks/roles/program_console/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/program_console/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/program_console/tasks/main.yml b/playbooks/roles/program_console/tasks/main.yml new file mode 100644 index 00000000000..20c3de0bed0 --- /dev/null +++ b/playbooks/roles/program_console/tasks/main.yml @@ -0,0 +1,89 @@ +- name: Remove old git repo + file: + state: absent + path: "{{ program_console_code_dir }}/" + +- name: Remove old app repo + file: + state: absent + path: "{{ program_console_app_dir }}" + +- name: Create program-console app folder + file: + path: "{{ program_console_app_dir }}" + state: directory + owner: "{{ program_console_user }}" + group: "{{ program_console_user }}" + +- name: Checkout program-console repo into {{ program_console_code_dir }} + git: + dest: "{{ program_console_code_dir }}" + repo: "{{ program_console_repo }}" + version: "{{ PROGRAM_CONSOLE_VERSION }}" + accept_hostkey: yes + become_user: "{{ program_console_user }}" + register: program_console_checkout + + +# Use apt to install nodeenv, so we can use nodeenv to install nodejs +- name: install nodenv by using apt + apt: + name: nodeenv + tags: + - install + - install:system-requirements + +# Install node +- name: install nodejs + shell: "nodeenv {{ program_console_nodeenv_dir }} --node={{ PROGRAM_CONSOLE_NODE_VERSION }} --prebuilt --force" + become_user: "{{ program_console_user }}" + environment: "{{ program_console_env_vars }}" + tags: + - install + - install:system-requirements + +# Set the npm registry +# This needs to be done as root since npm is weird about +# chown - https://github.com/npm/npm/issues/3565 +- name: Set the npm registry + shell: "{{ program_console_nodeenv_bin }}/npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'" + args: + creates: "{{ program_console_code_dir }}/.npmrc" + environment: "{{ program_console_env_vars }}" + become_user: "{{ program_console_user }}" + tags: + - install + - install:app-requirements + +#we need to do this so that npm can find a node install to use to build node-sass +- name: prepend node path + shell: "{{ program_console_nodeenv_bin }}/npm config set scripts-prepend-node-path true" + environment: "{{ program_console_env_vars }}" + become_user: "{{ program_console_user }}" + tags: + - install + - install:app-requirements + +#install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +#The version of ansible we are using also does not make use of "--unsafe-perm", which we need for node-sass +- name: install node dependencies + shell: "sudo {{ program_console_nodeenv_bin }}/node {{ program_console_nodeenv_bin }}/npm i --unsafe-perm" + args: + chdir: "{{ program_console_code_dir }}" + environment: "{{ program_console_env_vars }}" + become: true + become_method: sudo + tags: + - install + - install:app-requirements + +#install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: run program-console build + shell: "npm run build" + args: + chdir: "{{ program_console_code_dir }}" + environment: "{{ program_console_env_vars }}" + become_user: "{{ program_console_user }}" + tags: + - install + - install:app-requirements diff --git a/playbooks/roles/prospectus/defaults/main.yml b/playbooks/roles/prospectus/defaults/main.yml new file mode 100644 index 00000000000..9577543f0e5 --- /dev/null +++ b/playbooks/roles/prospectus/defaults/main.yml @@ -0,0 +1,76 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role prospectus +# + +# .env vars +PROSPECTUS_ENVIRONMENT: !!null +PROSPECTUS_OAUTH_ID: !!null +PROSPECTUS_OAUTH_SECRET: !!null + +PROSPECTUS_NEW_RELIC_LICENSE_KEY: 'fake-key' +PROSPECTUS_NEW_RELIC_APP_ID: 'fake-id' + +PROSPECTUS_CONTENTFUL_SPACE_ID: 'fake-key' +PROSPECTUS_CONTENTFUL_ACCESS_TOKEN: 'fake-key' +PROSPECTUS_CONTENTFUL_ENVIRONMENT: 'master' +PROSPECTUS_SEGMENT_WRITE_KEY: 'fake-key' +PROSPECTUS_ALGOLIA_BROWSE_KEY: 'fake-key' +PROSPECTUS_ALGOLIA_ADMIN_KEY: 'fake-key' + +# nginx vars +PROSPECTUS_DATA_DIR: '/edx/var/prospectus' +NGINX_PROSPECTUS_PROXY_INTERCEPT_ERRORS: true +NGINX_PROSPECTUS_DISABLE_INDEXING: false +PROSPECTUS_STATIC_SITES: [] +PROSPECTUS_TEMPORARY_REDIRECTS: [] + +# task vars +PROSPECTUS_GIT_IDENTITY: "none" +prospectus_repo: 'ssh://git@github.com/edx/prospectus.git' +PROSPECTUS_VERSION: 'master' +edx_django_service_use_python3: false +PROSPECTUS_NODE_VERSION: '18.13.0' +prospectus_service_name: 'prospectus' +prospectus_home: '{{ COMMON_APP_DIR }}/{{ prospectus_service_name }}' +prospectus_venv_dir: '{{ prospectus_home }}/venvs/{{ prospectus_service_name }}' +prospectus_nodeenv_dir: '{{ prospectus_home }}/nodeenvs/{{ prospectus_service_name }}' +prospectus_nodeenv_bin: '{{prospectus_nodeenv_dir}}/bin' +prospectus_app_dir: "{{ COMMON_APP_DIR }}/prospectus" +prospectus_user: 'root' +prospectus_env_vars: + PATH: "{{ prospectus_nodeenv_bin }}:{{ prospectus_venv_dir }}/bin:{{ ansible_env.PATH }}" + NODE_ENV: "{{ PROSPECTUS_ENVIRONMENT }}" + ACTIVE_ENV: "{{ PROSPECTUS_ENVIRONMENT }}" + GATSBY_CONTENTFUL_SPACE_ID: "{{ PROSPECTUS_CONTENTFUL_SPACE_ID }}" + GATSBY_CONTENTFUL_ACCESS_TOKEN: "{{ PROSPECTUS_CONTENTFUL_ACCESS_TOKEN }}" + GATSBY_CSRF_TOKEN_API_PATH: '/csrf/api/v1/token' + GATSBY_ACCESS_TOKEN_COOKIE_NAME: 'edx-jwt-cookie-header-payload' + CONTENTFUL_ENVIRONMENT: "{{ PROSPECTUS_CONTENTFUL_ENVIRONMENT }}" +prospectus_git_identity: "{{ prospectus_app_dir }}/prospectus-git-identity" +prospectus_code_dir: "{{ prospectus_app_dir }}/prospectus" +prospectus_ssl_nginx_port: 443 +prospectus_use_python3: true + +PROSPECTUS_RETAIN_CACHE_PUBLIC_DIRS: False +PROSPECTUS_ENABLE_NGINX: True +PROSPECTUS_ENABLE_BUILD: True + +PROSPECTUS_ENABLE_PRE_BUILD: True +PROSPECTUS_ENABLE_NPM_BUILD: True +PROSPECTUS_ENABLE_POST_BUILD: True + +PROSPECTUS_S3_HOSTING_PROXY_ENABLED: False +PROSPECTUS_S3_HOSTING_PROXY_CACHE_ENABLED: False +PROSPECTUS_S3_UPLOAD_ENABLED: False +PROSPECTUS_S3_UPLOAD_MAX_CONCURENCT_REQUESTS: 50 +PROSPECTUS_S3_HOSTING_BUCKET: !!null +PROSPECTUS_S3_HOSTING_BUCKET_URL: !!null diff --git a/playbooks/roles/prospectus/meta/main.yml b/playbooks/roles/prospectus/meta/main.yml new file mode 100644 index 00000000000..43bbc4a1460 --- /dev/null +++ b/playbooks/roles/prospectus/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: common + - role: nginx + when: PROSPECTUS_ENABLE_NGINX|bool diff --git a/playbooks/roles/prospectus/tasks/main.yml b/playbooks/roles/prospectus/tasks/main.yml new file mode 100644 index 00000000000..480f0b8a25b --- /dev/null +++ b/playbooks/roles/prospectus/tasks/main.yml @@ -0,0 +1,267 @@ +- name: Setup Prospectus Build Environment + when: PROSPECTUS_ENABLE_PRE_BUILD|bool + block: + # Remove all of the old code + - name: Remove read-only ssh key for the prospectus repo + file: + path: "{{ prospectus_git_identity }}" + state: absent + when: PROSPECTUS_GIT_IDENTITY != "none" + + - name: check if cache dir exists + stat: + path: "{{ prospectus_code_dir }}/.cache" + register: register_cache_dir + + - name: check if data dir exists + stat: + path: "{{ PROSPECTUS_DATA_DIR }}" + register: register_data_dir + + - name: create prospectus temp cache folder + file: + path: "/tmp/cache-data" + state: directory + when: PROSPECTUS_RETAIN_CACHE_PUBLIC_DIRS|bool and register_cache_dir.stat.exists + + - name: move cache dir to temp + command: mv {{ prospectus_code_dir }}/.cache /tmp/cache-data/ + when: PROSPECTUS_RETAIN_CACHE_PUBLIC_DIRS|bool and register_cache_dir.stat.exists + + - name: Remove old git repo + file: + state: absent + path: "{{ prospectus_code_dir }}/" + + - name: Remove old app repo + file: + state: absent + path: "{{ prospectus_app_dir }}" + + - name: Remove data directory + file: + state: absent + path: "{{ PROSPECTUS_DATA_DIR }}" + when: not PROSPECTUS_RETAIN_CACHE_PUBLIC_DIRS|bool + + - name: Create prospectus app folder + file: + path: "{{ prospectus_app_dir }}" + state: directory + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + + # This key is only needed if you are pulling down a private + # prospectus repo + - name: Install read-only ssh key for the prospectus repo + copy: + content: "{{ PROSPECTUS_GIT_IDENTITY }}" + dest: "{{ prospectus_git_identity }}" + force: yes + owner: "{{ prospectus_user }}" + mode: "0600" + when: PROSPECTUS_GIT_IDENTITY != "none" + + - name: "Checkout prospectus repo into {{ prospectus_code_dir }} with key" + git: + dest: "{{ prospectus_code_dir }}" + repo: "{{ prospectus_repo }}" + version: "{{ PROSPECTUS_VERSION }}" + accept_hostkey: yes + key_file: "{{ prospectus_git_identity }}" + become_user: "{{ prospectus_user }}" + register: prospectus_checkout_with_key + when: PROSPECTUS_GIT_IDENTITY != "none" + + - name: Checkout prospectus repo into {{ prospectus_code_dir }} without key + git: + dest: "{{ prospectus_code_dir }}" + repo: "{{ prospectus_repo }}" + version: "{{ PROSPECTUS_VERSION }}" + accept_hostkey: yes + become_user: "{{ prospectus_user }}" + register: prospectus_checkout_without_key + when: PROSPECTUS_GIT_IDENTITY == "none" + + - name: move cache dir to {{ prospectus_code_dir }} + command: mv /tmp/cache-data/.cache "{{ prospectus_code_dir }}/" + when: PROSPECTUS_RETAIN_CACHE_PUBLIC_DIRS|bool and register_cache_dir.stat.exists + + - name: create prospectus public folder + file: + path: "{{ prospectus_code_dir }}/public" + state: directory + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + when: PROSPECTUS_RETAIN_CACHE_PUBLIC_DIRS|bool and register_data_dir.stat.exists + + - name: move data dir to {{ prospectus_code_dir }}/public + shell: "mv {{ PROSPECTUS_DATA_DIR }}/* {{ prospectus_code_dir }}/public/" + when: PROSPECTUS_RETAIN_CACHE_PUBLIC_DIRS|bool and register_data_dir.stat.exists + + - name: install python3.8 + apt: + pkg: + - python3.8-dev + - python3.8-distutils + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + when: prospectus_use_python3 + tags: + - install + - install:system-requirements + + - name: Add prospectus configuration file + template: + src: ".env.environment.j2" + dest: "{{ prospectus_code_dir }}/config/.env.keys" + mode: "0644" + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + + - name: Install nodeenv + apt: + name: nodeenv + update_cache: yes + become_user: "{{ prospectus_user }}" + environment: "{{ prospectus_env_vars }}" + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + tags: + - install + - install:system-requirements + + # Install node + - name: Create nodeenv + shell: "nodeenv {{ prospectus_nodeenv_dir }} --node={{ PROSPECTUS_NODE_VERSION }} --prebuilt --force" + become_user: "{{ prospectus_user }}" + environment: "{{ prospectus_env_vars }}" + tags: + - install + - install:system-requirements + + - name: copy the template to the desired location + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ item.owner }}" + group: "{{ item.group }}" + mode: "{{ item.mode }}" + with_items: + - { src: 'prospectus_env.j2', dest: '{{ prospectus_app_dir }}/prospectus_env', owner: '{{ prospectus_user }}', group: '{{ prospectus_user }}', mode: '0644' } + + # Set the npm registry + # This needs to be done as root since npm is weird about + # chown - https://github.com/npm/npm/issues/3565 + - name: Set the npm registry + shell: "npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'" + args: + creates: "{{ prospectus_code_dir }}/.npmrc" + environment: "{{ prospectus_env_vars }}" + tags: + - install + - install:app-requirements + + # Set the npm registry permissions + - name: Set the npm registry permissions + file: + path: "{{ prospectus_code_dir }}/.npmrc" + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + tags: + - install + - install:app-requirements + + # Install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json + - name: install node dependencies + shell: "{{ prospectus_nodeenv_bin }}/npm install --unsafe-perm=true --allow-root" + args: + chdir: "{{ prospectus_code_dir }}" + environment: "{{ prospectus_env_vars }}" + become_user: "{{ prospectus_user }}" + tags: + - install + - install:app-requirements + - name: Install pngquant + apt: + name: "pngquant" + update_cache: yes + register: install_pkgs + until: install_pkgs is success + retries: 10 + delay: 5 + tags: + - install + - install:system-requirements + +# Install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: run prospectus build + when: PROSPECTUS_ENABLE_NPM_BUILD|bool + shell: "{{ prospectus_nodeenv_bin }}/npm run build" + args: + chdir: "{{ prospectus_code_dir }}" + environment: "{{ prospectus_env_vars }}" + become_user: "{{ prospectus_user }}" + register: result + until: "result is not failed" + retries: 2 + delay: 10 + tags: + - install + - install:app-requirements + +- name: Post process prospectus gatsby build + when: PROSPECTUS_ENABLE_POST_BUILD|bool + block: + - name: Compress images + shell: "find public -name '*.png' -exec pngquant --skip-if-larger --quality 50-75 --ext .png --force -- {} \\;" + args: + chdir: "{{ prospectus_code_dir }}" + become_user: "{{ prospectus_user }}" + + # Copy over the target from the previous build to where it needs to be + - name: Create data folder + file: + path: "{{ PROSPECTUS_DATA_DIR }}" + state: directory + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + + - name: Move prospectus public folder to var folder + shell: "mv {{ prospectus_code_dir }}/public/* {{ PROSPECTUS_DATA_DIR }}" + + - name: Set Prospectus S3 max concurrency + become_user: "{{ prospectus_user }}" + shell: 'aws configure set s3.max_concurrent_requests {{ PROSPECTUS_S3_UPLOAD_MAX_CONCURENCT_REQUESTS }}' + when: PROSPECTUS_S3_UPLOAD_ENABLED|bool + tags: + - install + - install:system-requirements + + - name: Upload prospectus to S3 + become_user: "{{ prospectus_user }}" + shell: "aws s3 sync --quiet {{ PROSPECTUS_DATA_DIR }} s3://{{ PROSPECTUS_S3_HOSTING_BUCKET }}/{{ PROSPECTUS_S3_HOSTING_PREFIX | default(PROSPECTUS_VERSION, true) }}" + when: PROSPECTUS_S3_UPLOAD_ENABLED|bool + tags: + - install + - install:system-requirements + + - name: Upload prospectus hashed static files to S3 + become_user: "{{ prospectus_user }}" + shell: "aws s3 cp --recursive {{ PROSPECTUS_DATA_DIR }} s3://{{ PROSPECTUS_S3_HOSTING_BUCKET }}/static_hashed --exclude '*' --include '/*.css' --include '/*.js' --include '/*.map' --exclude '*/*' --include 'static/*' --include 'page-data/sq/d/*'" + when: PROSPECTUS_S3_UPLOAD_ENABLED|bool + tags: + - install + - install:system-requirements + + - name: Download prospectus redirects to GoCD + when: PROSPECTUS_S3_UPLOAD_ENABLED|bool + fetch: + src: "{{ prospectus_redirect_file }}" + dest: "{{ artifact_path }}/prospectus-redirects.conf" + flat: yes diff --git a/playbooks/roles/prospectus/templates/.env.environment.j2 b/playbooks/roles/prospectus/templates/.env.environment.j2 new file mode 100644 index 00000000000..fcc8ba4aed8 --- /dev/null +++ b/playbooks/roles/prospectus/templates/.env.environment.j2 @@ -0,0 +1,16 @@ +# This file is created and updated by ansible + +OAUTH_ID={{ PROSPECTUS_OAUTH_ID }} +OAUTH_SECRET={{ PROSPECTUS_OAUTH_SECRET }} + +NEW_RELIC_LICENSE_KEY={{ PROSPECTUS_NEW_RELIC_LICENSE_KEY }} +NEW_RELIC_APP_ID={{ PROSPECTUS_NEW_RELIC_APP_ID }} + +GATSBY_CONTENTFUL_SPACE_ID={{ PROSPECTUS_CONTENTFUL_SPACE_ID }} +GATSBY_CONTENTFUL_ACCESS_TOKEN={{ PROSPECTUS_CONTENTFUL_ACCESS_TOKEN }} +GATSBY_SEGMENT_WRITE_KEY={{ PROSPECTUS_SEGMENT_WRITE_KEY }} +PROSPECTUS_ALGOLIA_BROWSE_KEY={{ PROSPECTUS_ALGOLIA_BROWSE_KEY }} +PROSPECTUS_ALGOLIA_ADMIN_KEY={{ PROSPECTUS_ALGOLIA_ADMIN_KEY }} +GATSBY_XPERT_STG_API_URL={{ PROSPECTUS_GATSBY_XPERT_STG_API_URL }} +GATSBY_XPERT_UAT_API_URL={{ PROSPECTUS_GATSBY_XPERT_UAT_API_URL }} +GATSBY_XPERT_PROD_API_URL={{ PROSPECTUS_GATSBY_XPERT_PROD_API_URL }} diff --git a/playbooks/roles/prospectus/templates/prospectus_env.j2 b/playbooks/roles/prospectus/templates/prospectus_env.j2 new file mode 100644 index 00000000000..ba116f8e22b --- /dev/null +++ b/playbooks/roles/prospectus/templates/prospectus_env.j2 @@ -0,0 +1,6 @@ +# {{ ansible_managed }} +{% for name,value in prospectus_env_vars.items() %} +{%- if value %} +export {{ name }}="{{ value }}" +{%- endif %} +{% endfor %} diff --git a/playbooks/roles/python/defaults/main.yml b/playbooks/roles/python/defaults/main.yml new file mode 100644 index 00000000000..a020e493d8b --- /dev/null +++ b/playbooks/roles/python/defaults/main.yml @@ -0,0 +1,4 @@ +# Install python2.7 + the /usr/bin/python symlink. + +python_packages: + - python-minimal diff --git a/playbooks/roles/python/tasks/main.yml b/playbooks/roles/python/tasks/main.yml new file mode 100644 index 00000000000..9ca64af1723 --- /dev/null +++ b/playbooks/roles/python/tasks/main.yml @@ -0,0 +1,22 @@ +# Bootstrap packages must be installed with raw commands, because ubuntu +# xenial+ cloud images don't have python2.7 installed, and ansible doesn't yet +# support python3. + +- name: Wait until cloud-init has finished running + raw: test -e /usr/bin/cloud-init && cloud-init status --wait + ignore_errors: yes + +- name: Update apt-get + raw: apt-get update -qq + register: python_update_result + until: python_update_result.rc == 0 + retries: 10 + delay: 10 + +- name: Install packages + raw: "apt-get install -qq {{ item }}" + with_items: "{{ python_packages }}" + register: install_packages + retries: 10 + delay: 10 + until: install_packages is succeeded diff --git a/playbooks/roles/rabbitmq/defaults/main.yml b/playbooks/roles/rabbitmq/defaults/main.yml index 835fa05c8a2..630572de7ae 100644 --- a/playbooks/roles/rabbitmq/defaults/main.yml +++ b/playbooks/roles/rabbitmq/defaults/main.yml @@ -1,42 +1,66 @@ -#Variables for rabbitmq --- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://github.com/openedx/configuration/wiki +# code style: https://github.com/openedx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Defaults for role rabbitmq +# -rabbit_app_dir: "{{ COMMON_APP_DIR }}/rabbitmq" -rabbit_data_dir: "{{ COMMON_DATA_DIR }}/rabbitmq" -rabbit_log_dir: "{{ COMMON_LOG_DIR }}/rabbitmq" -rabbit_cfg_dir: "{{ COMMON_CFG_DIR }}/rabbitmq" +rabbitmq_app_dir: "{{ COMMON_APP_DIR }}/rabbitmq" +rabbitmq_data_dir: "{{ COMMON_DATA_DIR }}/rabbitmq" +rabbitmq_log_dir: "{{ COMMON_LOG_DIR }}/rabbitmq" +rabbitmq_cfg_dir: "{{ COMMON_CFG_DIR }}/rabbitmq" + +rabbitmq_user: "rabbitmq" +rabbitmq_group: "rabbitmq" + +RABBIT_ADMIN_PASSWORD: 'the example admin password' # Environment specific vars RABBIT_ERLANG_COOKIE: 'DEFAULT_COOKIE' RABBIT_USERS: - name: 'admin' - password: 'the example admin password' + password: "{{ RABBIT_ADMIN_PASSWORD }}" - name: 'edx' password: 'edx' - name: 'celery' password: 'celery' -RABBITMQ_CLUSTERED: !!null - RABBITMQ_VHOSTS: - '/' +RABBITMQ_CLUSTERED_HOSTS: [] +# This is the default for rabbit, but allows overriding if +# you run a dedicated rabbit cluster +# https://www.rabbitmq.com/memory.html +# https://www.rabbitmq.com/production-checklist.html +RABBITMQ_VM_MEMORY_HIGH_WATERMARK: 0.4 + +RABBITMQ_VERSION: 3.6.9-1 + +RABBITMQ_CLUSTER_NAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-rabbit" + # Internal role variables below this line # option to force deletion of the mnesia dir rabbitmq_refresh: false -rabbitmq_apt_key: "http://www.rabbitmq.com/rabbitmq-signing-key-public.asc" -rabbitmq_repository: "deb http://www.rabbitmq.com/debian/ testing main" -rabbitmq_pkg: "rabbitmq-server" +rabbitmq_repo: "deb https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu/ {{ ansible_distribution_release }} main" +rabbitmq_repo_key: "https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey" + rabbitmq_debian_pkgs: - - python-software-properties + - "apt-transport-https" rabbitmq_config_dir: "/etc/rabbitmq" rabbitmq_cookie_dir: "/var/lib/rabbitmq" -rabbitmq_cookie_location: "{{rabbitmq_cookie_dir}}/.erlang.cookie" +rabbitmq_cookie_location: "{{ rabbitmq_cookie_dir }}/.erlang.cookie" -rabbitmq_mnesia_folder: "{{rabbitmq_cookie_dir}}/mnesia" +rabbitmq_mnesia_folder: "{{ rabbitmq_cookie_dir }}/mnesia" rabbitmq_port: 5672 rabbitmq_management_port: 15672 @@ -44,10 +68,10 @@ rabbitmq_ip: "{{ ansible_default_ipv4.address }}" # Structure for auth config file. rabbitmq_auth_config: - erlang_cookie: $RABBIT_ERLANG_COOKIE - admins: $RABBIT_USERS - -rabbitmq_clustered_hosts: [] + erlang_cookie: "{{ RABBIT_ERLANG_COOKIE }}" + admins: "{{ RABBIT_USERS }}" rabbitmq_plugins: - rabbitmq_management + +rabbitmq_cron_timeout: 5 diff --git a/playbooks/roles/rabbitmq/tasks/main.yml b/playbooks/roles/rabbitmq/tasks/main.yml index bcb7d719b2f..575bb133dca 100644 --- a/playbooks/roles/rabbitmq/tasks/main.yml +++ b/playbooks/roles/rabbitmq/tasks/main.yml @@ -2,105 +2,328 @@ # It is recommended that this role be played with serial set to 1 because # There is a bug with initializing multiple nodes in the HA cluster at once # http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html +# +- name: install packages needed by rabbit + apt: + name: "{{ rabbitmq_debian_pkgs }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + tags: + - install + - install:app-requirements + +- name: trust rabbit's packagecloud repository + apt_key: + url: "{{ rabbitmq_repo_key }}" + state: present + tags: + - "install" + - "install:app-requirements" -- name: trust rabbit repository - apt_key: url={{rabbitmq_apt_key}} state=present +- name: add rabbit's packagecloud repository + apt_repository: + repo: "{{ rabbitmq_repo }}" + state: present + update_cache: yes + tags: + - "install" + - "install:app-requirements" -- name: install python-software-properties if debian - apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present +# If we don't set pipefail first, `||` will be looking at the exit code of the last command in the pipe +- name: Check if rabbit is installed + shell: | + set -o pipefail + dpkg -s rabbitmq-server | grep Version | sed -r 's/.*: (.*)/\1/' || echo 'not installed' + args: + executable: /bin/bash + register: installed_version + tags: + - "install" + - "install:app-requirements" -- name: add rabbit repository - apt_repository: repo="{{rabbitmq_repository}}" state=present +- name: Warn if wrong rabbit version is installed + debug: msg="Expected rabbitmq version {{ RABBITMQ_VERSION }}, found {{ installed_version.stdout }} - will not upgrade in place" + when: installed_version.stdout is defined and installed_version.stdout not in [RABBITMQ_VERSION, 'not installed'] + tags: + - "install" + - "install:app-requirements" -- name: install rabbitmq - apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes +- name: Install rabbit package + apt: + name: "rabbitmq-server={{ RABBITMQ_VERSION }}" + state: present + force: yes + update_cache: yes + when: installed_version.stdout is defined and installed_version.stdout == "not installed" + tags: + - "install" + - "install:app-requirements" -- name: stop rabbit cluster - service: name=rabbitmq-server state=stopped +- name: Stop rabbit cluster + service: + name: rabbitmq-server + state: stopped + tags: + - "install" + - "install:app-configuration" -# in case there are lingering processes, ignore errors +# In case there are lingering processes, ignore errors # silently -- name: send sigterm to any running rabbitmq processes - shell: pkill -u rabbitmq || true +- name: Send sigterm to any running rabbitmq processes + shell: "pkill -u rabbitmq || true" + tags: + - "install" + - "install:app-configuration" + +- name: Create rabbitmq edx directories + file: + path: "{{ item }}" + owner: "{{ rabbitmq_user }}" + mode: "0755" + state: directory + with_items: + - "{{ rabbitmq_app_dir }}" + - "{{ rabbitmq_log_dir }}" + tags: + - "install" + - "install:app-configuration" + +- name: Add queue monitoring script + template: + src: "edx/app/rabbitmq/log-rabbitmq-queues.sh.j2" + dest: "{{ rabbitmq_app_dir }}/log-rabbitmq-queues.sh" + owner: "{{ rabbitmq_user }}" + group: "{{ rabbitmq_group }}" + mode: "0755" + tags: + - "install" + - "install:app-configuration" + - "monitoring" + +- name: Add RabbitMQ memory usage script + template: + src: "edx/app/rabbitmq/log-rabbitmq-memory.sh.j2" + dest: "{{ rabbitmq_app_dir }}/log-rabbitmq-memory.sh" + owner: "{{ rabbitmq_user }}" + group: "{{ rabbitmq_group }}" + mode: "0775" + tags: + - "install" + - "install:app-configuration" + - "monitoring" + +- name: Set up a cron job to run queue script + cron: + name: "log-queue-lenghts" + job: /usr/bin/timeout {{ rabbitmq_cron_timeout }} {{ rabbitmq_app_dir }}/log-rabbitmq-queues.sh >/dev/null 2>&1 + tags: + - "install" + - "install:app-configuration" + - "monitoring" + +- name: Set up a cron job to run the script + cron: + name: "log-rabbitmq-memory-usage" + job: /usr/bin/timeout {{ rabbitmq_cron_timeout }} {{ rabbitmq_app_dir }}/log-rabbitmq-memory.sh >/dev/null 2>&1 + tags: + - "install" + - "install:app-configuration" + - "monitoring" + +- name: install logrotate configuration + template: + src: etc/logrotate.d/rabbitmq.j2 + dest: /etc/logrotate.d/rabbitmq + tags: + - "install" + - "install:app-configuration" + - "logrotate" # Defaulting to /var/lib/rabbitmq -- name: create cookie directory - file: > - path={{rabbitmq_cookie_dir}} - owner=rabbitmq group=rabbitmq mode=0755 state=directory - -- name: add rabbitmq erlang cookie - template: > - src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}} - owner=rabbitmq group=rabbitmq mode=0400 +- name: Create cookie directory + file: + path: "{{ rabbitmq_cookie_dir }}" + state: directory + owner: rabbitmq + group: rabbitmq + mode: "0755" + tags: + - "install" + - "install:app-configuration" + +- name: Add rabbitmq erlang cookie + template: + src: "erlang.cookie.j2" + dest: "{{ rabbitmq_cookie_location }}" + owner: rabbitmq + group: rabbitmq + mode: "0400" register: erlang_cookie + tags: + - "install" + - "install:app-configuration" # Defaulting to /etc/rabbitmq -- name: create rabbitmq config directory - file: > - path={{rabbitmq_config_dir}} - owner=root group=root mode=0755 state=directory - -- name: add rabbitmq environment configuration - template: > - src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf - owner=root group=root mode=0644 - -- name: add rabbitmq cluster configuration - template: > - src=etc/rabbitmq/rabbitmq.config.j2 - dest={{rabbitmq_config_dir}}/rabbitmq.config - owner=root group=root mode=0644 +- name: Create rabbitmq config directory + file: + path: "{{ rabbitmq_config_dir }}" + state: directory + owner: root + group: root + mode: "0755" + tags: + - "install" + - "install:app-configuration" + +- name: Add rabbitmq environment configuration + template: + src: "rabbitmq-env.conf.j2" + dest: "{{ rabbitmq_config_dir }}/rabbitmq-env.conf" + owner: root + group: root + mode: "0644" + tags: + - "install" + - "install:app-configuration" + +- name: Add rabbitmq cluster configuration + template: + src: "etc/rabbitmq/rabbitmq.config.j2" + dest: "{{ rabbitmq_config_dir }}/rabbitmq.config" + owner: root + group: root + mode: "0644" register: cluster_configuration + tags: + - "install" + - "install:app-configuration" -- name: install plugins +- name: Install plugins rabbitmq_plugin: - names={{",".join(rabbitmq_plugins)}} state=enabled + names: "{{ item }}" + state: enabled + with_items: "{{ rabbitmq_plugins }}" + tags: + - "install" + - "install:app-configuration" # When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'. # This folder should be deleted before clustering is setup because it retains data # that can conflict with the clustering information. -- name: remove mnesia configuration - file: path={{rabbitmq_mnesia_folder}} state=absent +- name: Remove mnesia configuration + file: + path: "{{ rabbitmq_mnesia_folder }}" + state: absent when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh + tags: + - "install" + - "install:app-configuration" -- name: start rabbit nodes - service: name=rabbitmq-server state=restarted +- name: Start rabbit nodes + service: + name: rabbitmq-server + state: started + tags: + - "install" + - "install:app-configuration" -- name: wait for rabbit to start - wait_for: port={{ rabbitmq_management_port }} delay=2 +- name: Wait for rabbit to start + wait_for: + port: "{{ rabbitmq_management_port }}" + delay: 2 + tags: + - "install" + - "install:app-configuration" -- name: remove guest user - rabbitmq_user: user="guest" state=absent +- name: Remove guest user + rabbitmq_user: + user: "guest" + state: absent + tags: + - users + - maintenance + - "manage" + - "manage:app-users" -- name: add vhosts - rabbitmq_vhost: name={{ item }} state=present - with_items: RABBITMQ_VHOSTS +- name: Add vhosts + rabbitmq_vhost: + name: "{{ item }}" + state: present + with_items: "{{ RABBITMQ_VHOSTS }}" + tags: + - vhosts + - maintenance + - "install" + - "install:app-configuration" -- name: add admin users - rabbitmq_user: > - user='{{item[0].name}}' password='{{item[0].password}}' - read_priv='.*' write_priv='.*' - configure_priv='.*' tags="administrator" state=present - vhost={{ item[1] }} - with_nested: - - ${rabbitmq_auth_config.admins} - - RABBITMQ_VHOSTS +- set_fact: + permissions: "{{ permissions|default([])+[{'vhost':item,'configure_priv':'.*','read_priv':'.*','write_priv':'.*'}] }}" + with_items: + - "{{ RABBITMQ_VHOSTS }}" + tags: + - users + - maintenance + - "manage" + - "manage:app-users" + +- name: Add admin users + rabbitmq_user: + user: "{{ item.name }}" + password: "{{ item.password }}" + tags: "administrator" + state: "{{ item.state | default('present') }}" + permissions: "{{ permissions }}" + with_items: + - "{{rabbitmq_auth_config.admins}}" when: "'admins' in rabbitmq_auth_config" + tags: + - users + - maintenance + - "manage" + - "manage:app-users" -- name: make queues mirrored - shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'" - when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1 +- name: Make queues mirrored + rabbitmq_policy: + name: HA + pattern: .* + vhost: "{{ item }}" + args: + tags: + ha-mode: all + ha-sync-mode: automatic + with_items: "{{ RABBITMQ_VHOSTS }}" + when: RABBITMQ_CLUSTERED_HOSTS|length > 1 + tags: + - ha + - maintenance + - "install" + - "install:app-configuration" # # Depends upon the management plugin # -- name: install admin tools - get_url: > - url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin - dest=/usr/local/bin/rabbitmqadmin - -- name: ensure rabbitmqadmin attributes - file: > - path=/usr/local/bin/rabbitmqadmin owner=root - group=root mode=0655 +- name: Install admin tools + get_url: + url: "http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin" + dest: "/usr/local/bin/rabbitmqadmin" + tags: + - "install" + - "install:app-configuration" + +- name: Ensure rabbitmqadmin attributes + file: + path: "/usr/local/bin/rabbitmqadmin" + owner: root + group: root + mode: "0655" + tags: + - "install" + - "install:app-configuration" + +- name: Set cluster name + command: "/usr/sbin/rabbitmqctl set_cluster_name {{ RABBITMQ_CLUSTER_NAME }}" + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/rabbitmq/templates/edx/app/rabbitmq/log-rabbitmq-memory.sh.j2 b/playbooks/roles/rabbitmq/templates/edx/app/rabbitmq/log-rabbitmq-memory.sh.j2 new file mode 100644 index 00000000000..6fffef0e90b --- /dev/null +++ b/playbooks/roles/rabbitmq/templates/edx/app/rabbitmq/log-rabbitmq-memory.sh.j2 @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -x + +log_directory={{ rabbitmq_log_dir }} + +{% raw %} +MemTotal=`grep 'MemTotal' /proc/meminfo | awk '{print $2}'` +memusg=`/usr/sbin/rabbitmqctl status | grep total | awk -F',|}' -v date="$(date)" -v MemTotal="$MemTotal" 'NR==1{printf date"\tRabbitMQ Memory Usage:(%%)\t" ((($2/1024)/MemTotal)*100)}'` +echo $memusg >> "$log_directory/rabbitmq.memory.log" +{% endraw %} diff --git a/playbooks/roles/rabbitmq/templates/edx/app/rabbitmq/log-rabbitmq-queues.sh.j2 b/playbooks/roles/rabbitmq/templates/edx/app/rabbitmq/log-rabbitmq-queues.sh.j2 new file mode 100644 index 00000000000..2efa27a6097 --- /dev/null +++ b/playbooks/roles/rabbitmq/templates/edx/app/rabbitmq/log-rabbitmq-queues.sh.j2 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -x + +vpc_name={{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }} +log_directory={{ rabbitmq_log_dir }} + + +{% raw %} +OLD_IFS=$IFS +IFS=$'\n' +vhosts=`/usr/sbin/rabbitmqctl list_vhosts | grep "^/"` +for vhost in $vhosts; do + queues=`/usr/sbin/rabbitmqctl list_queues -p $vhost | awk 'NF==2{ print }'` + mkdir -p ${log_directory}/${vhost} + for queue in $queues; do + queue_name=`echo $queue | awk '{ print $1 }'` + echo $queue | sed 's/\s*/ /' | awk -v date="$(date)" -v vhost="$vhost" '{ print "date=\x27"date"\x27","vhost=\x27"vhost"\x27","queue=\x27"$1"\x27","length="$2}' >> ${log_directory}/${vhost}/${queue_name}.log + done +done +IFS=$OLD_IFS +{% endraw %} diff --git a/playbooks/roles/rabbitmq/templates/etc/logrotate.d/rabbitmq.j2 b/playbooks/roles/rabbitmq/templates/etc/logrotate.d/rabbitmq.j2 new file mode 100644 index 00000000000..89e86ca3bac --- /dev/null +++ b/playbooks/roles/rabbitmq/templates/etc/logrotate.d/rabbitmq.j2 @@ -0,0 +1,11 @@ +# We want to hit the top level queues and any vhost queues +# such as fulfillment +{{ rabbitmq_log_dir }}/*.log {{ rabbitmq_log_dir }}/*/*.log { + compress + dateext + dateformat -%Y%m%d-%s + missingok + daily + rotate 3 + nocreate +} diff --git a/playbooks/roles/rabbitmq/templates/etc/rabbitmq/rabbitmq.config.j2 b/playbooks/roles/rabbitmq/templates/etc/rabbitmq/rabbitmq.config.j2 index ff7eef4e294..b78845d5362 100644 --- a/playbooks/roles/rabbitmq/templates/etc/rabbitmq/rabbitmq.config.j2 +++ b/playbooks/roles/rabbitmq/templates/etc/rabbitmq/rabbitmq.config.j2 @@ -1,20 +1,10 @@ % {{ ansible_managed }} -{% if RABBITMQ_CLUSTERED -%} - {%- set hosts= [] -%} - - {%- for host in hostvars.keys() -%} - {% do hosts.append("rabbit@ip-" + host.replace('.','-')) %} - {%- endfor %} - -[{rabbit, - [{cluster_nodes, {['{{ hosts|join("\',\'") }}'], disc}}]}]. - -{%- else -%} -{# If rabbitmq_clustered_hosts is set, use that instead assuming an aws stack. +[{rabbit, [ + {log_levels, [{connection, info}]}, +{# Note: That these names should include the node name prefix. eg. 'rabbit@hostname' #} -[{rabbit, - [{cluster_nodes, {['{{ rabbitmq_clustered_hosts|join("\',\'") }}'], disc}}]}]. - -{%- endif -%} + {cluster_nodes, {['{{ RABBITMQ_CLUSTERED_HOSTS|join("\',\'") }}'], disc}}, + {vm_memory_high_watermark, {{ RABBITMQ_VM_MEMORY_HIGH_WATERMARK }} } +]}]. diff --git a/playbooks/roles/rbenv/defaults/main.yml b/playbooks/roles/rbenv/defaults/main.yml index e48136c3594..5350faac24b 100644 --- a/playbooks/roles/rbenv/defaults/main.yml +++ b/playbooks/roles/rbenv/defaults/main.yml @@ -1,11 +1,13 @@ --- -rbenv_version: 'v0.4.0' -rbenv_bundler_version: '1.3.2' -rbenv_rake_version: '10.0.3' +RBENV_VERSION: 'v1.0.0' +RBENV_BUNDLER_VERSION: '2.3.4' +RBENV_RAKE_VERSION: '13.0.6' rbenv_root: "{{ rbenv_dir }}/.rbenv" rbenv_gem_root: "{{ rbenv_dir }}/.gem" rbenv_gem_bin: "{{ rbenv_gem_root }}/bin" +rbenv_gemfile: 'Gemfile3' +RBENV_RUBYGEMS_VERSION: '3.2.33' rbenv_bin: "{{ rbenv_dir }}/.rbenv/bin" rbenv_shims: "{{ rbenv_root }}/shims" rbenv_path: "{{ rbenv_bin }}:{{ rbenv_shims }}:{{ rbenv_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" @@ -19,6 +21,7 @@ rbenv_debian_pkgs: - libxslt1-dev - zlib1g-dev rbenv_environment: + BUNDLE_GEMFILE: "{{ rbenv_gemfile }}" RBENV_ROOT: "{{ rbenv_root }}" GEM_ROOT: "{{ rbenv_gem_root }}" GEM_HOME: "{{ rbenv_gem_root }}" diff --git a/playbooks/roles/rbenv/tasks/main.yml b/playbooks/roles/rbenv/tasks/main.yml index b1b042ad04e..23589323f96 100644 --- a/playbooks/roles/rbenv/tasks/main.yml +++ b/playbooks/roles/rbenv/tasks/main.yml @@ -13,7 +13,7 @@ # - role: rbenv # rbenv_user: "{{ forum_user }}" # rbenv_dir: "{{ forum_rbenv_dir }}" -# rbenv_ruby_version: "{{ forum_ruby_version }}" +# rbenv_ruby_version: "{{ FORUM_RUBY_VERSION }}" # # Parameters: # @@ -25,104 +25,178 @@ # with a number of changes. # -- fail: rbenv_user required for role +- fail: + msg: "rbenv_user required for role" when: rbenv_user is not defined -- fail: rbenv_dir required for role +- fail: + msg: "rbenv_dir required for role" when: rbenv_dir is not defined -- fail: rbenv_ruby_version required for role +- fail: + msg: "rbenv_ruby_version required for role" when: rbenv_ruby_version is not defined - name: create rbenv user {{ rbenv_user }} - user: > - name={{ rbenv_user }} home={{ rbenv_dir }} - shell=/bin/false createhome=no + user: + name: "{{ rbenv_user }}" + home: "{{ rbenv_dir }}" + shell: /bin/false + createhome: no when: rbenv_user != common_web_user + tags: + - install + - install:base - name: create rbenv dir if it does not exist - file: > - path="{{ rbenv_dir }}" owner="{{ rbenv_user }}" - state=directory + file: + path: "{{ rbenv_dir }}" + owner: "{{ rbenv_user }}" + state: directory + tags: + - install + - install:base - name: install build depends - apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no - with_items: rbenv_debian_pkgs + apt: pkg={{ ",".join(rbenv_debian_pkgs) }} update_cache=yes state=present install_recommends=no + with_items: "{{ rbenv_debian_pkgs }}" + tags: + - install + - install:base - name: update rbenv repo - git: > - repo=https://github.com/sstephenson/rbenv.git - dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }} - sudo_user: "{{ rbenv_user }}" + git: + repo: https://github.com/sstephenson/rbenv.git + dest: "{{ rbenv_dir }}/.rbenv" + version: "{{ RBENV_VERSION }}" + accept_hostkey: yes + become_user: "{{ rbenv_user }}" + tags: + - install + - install:base - name: ensure ruby_env exists - template: > - src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env - sudo_user: "{{ rbenv_user }}" + template: + src: ruby_env.j2 + dest: "{{ rbenv_dir }}/ruby_env" + become_user: "{{ rbenv_user }}" + tags: + - install + - install:base - name: check ruby-build installed command: test -x /usr/local/bin/ruby-build register: rbuild_present ignore_errors: yes + tags: + - install + - install:base - name: if ruby-build exists, which versions we can install command: /usr/local/bin/ruby-build --definitions - when: rbuild_present|success + when: rbuild_present is succeeded register: installable_ruby_vers ignore_errors: yes + tags: + - install + - install:base ### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version - name: create temporary directory command: mktemp -d register: tempdir - sudo_user: "{{ rbenv_user }}" - when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) + become_user: "{{ rbenv_user }}" + when: rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) + tags: + - install + - install:base - name: clone ruby-build repo - git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build - when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) - sudo_user: "{{ rbenv_user }}" + git: + repo: https://github.com/sstephenson/ruby-build.git + dest: "{{ tempdir.stdout }}/ruby-build" + accept_hostkey: yes + when: tempdir.stdout is defined and (rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) + become_user: "{{ rbenv_user }}" + tags: + - install + - install:base - name: install ruby-build command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build - when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) + when: tempdir.stdout is defined and (rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) + tags: + - install + - install:base - name: remove temporary directory file: path={{ tempdir.stdout }} state=absent - when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) + when: tempdir.stdout is defined and (rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) + tags: + - install + - install:base - name: check ruby {{ rbenv_ruby_version }} installed shell: "rbenv versions | grep {{ rbenv_ruby_version }}" register: ruby_installed - sudo_user: "{{ rbenv_user }}" + become_user: "{{ rbenv_user }}" environment: "{{ rbenv_environment }}" ignore_errors: yes + tags: + - install + - install:base - name: install ruby {{ rbenv_ruby_version }} shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}" - when: ruby_installed|failed - sudo_user: "{{ rbenv_user }}" + when: ruby_installed is failed + become_user: "{{ rbenv_user }}" environment: "{{ rbenv_environment }}" + tags: + - install + - install:base - name: set global ruby {{ rbenv_ruby_version }} shell: "rbenv global {{ rbenv_ruby_version }}" - sudo_user: "{{ rbenv_user }}" + become_user: "{{ rbenv_user }}" environment: "{{ rbenv_environment }}" + tags: + - install + - install:base + +- name: update rubygems + shell: "gem update --system {{ RBENV_RUBYGEMS_VERSION }}" + become_user: "{{ rbenv_user }}" + environment: "{{ rbenv_environment }}" + tags: + - install + - install:base - name: install bundler - shell: "gem install bundler -v {{ rbenv_bundler_version }}" - sudo_user: "{{ rbenv_user }}" + shell: "gem install bundler -v {{ RBENV_BUNDLER_VERSION }}" + become_user: "{{ rbenv_user }}" environment: "{{ rbenv_environment }}" + tags: + - install + - install:base - name: remove rbenv version of rake file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent + tags: + - install + - install:base - name: install rake gem - shell: "gem install rake -v {{ rbenv_rake_version }}" - sudo_user: "{{ rbenv_user }}" + shell: "gem install rake -v {{ RBENV_RAKE_VERSION }}" + become_user: "{{ rbenv_user }}" environment: "{{ rbenv_environment }}" + tags: + - install + - install:base - name: rehash shell: "rbenv rehash" - sudo_user: "{{ rbenv_user }}" + become_user: "{{ rbenv_user }}" environment: "{{ rbenv_environment }}" + tags: + - install + - install:base diff --git a/playbooks/roles/redis/defaults/main.yml b/playbooks/roles/redis/defaults/main.yml new file mode 100644 index 00000000000..595d002f6c9 --- /dev/null +++ b/playbooks/roles/redis/defaults/main.yml @@ -0,0 +1,37 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role redis +# +REDIS_PASSWORD: !!null +REDIS_BIND_IP: 127.0.0.1 +REDIS_PERSISTENCE_DIR: "/var/lib/redis" +REDIS_MEMORY_LIMIT: "512mb" +REDIS_MAX_MEMORY_POLICY: "noeviction" +# +# vars are namespace with the module name. +# +redis_role_name: redis + +redis_user: redis +redis_group: redis + +# +# OS packages +# + +REDIS_REPO: "deb https://packages.redis.io/deb {{ ansible_distribution_release }} main" +REDIS_VERSION: "6:7.2.0-1rl1~focal1" + +redis_debian_pkgs: + - "redis-tools={{ REDIS_VERSION }}" + - "redis-server={{ REDIS_VERSION }}" + +redis_redhat_pkgs: [] diff --git a/playbooks/roles/redis/handlers/main.yml b/playbooks/roles/redis/handlers/main.yml new file mode 100644 index 00000000000..dbdbf38640c --- /dev/null +++ b/playbooks/roles/redis/handlers/main.yml @@ -0,0 +1,20 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Handlers for role redis +# +# Overview: +# +# +- name: reload redis + service: + name: redis-server + state: restarted diff --git a/playbooks/roles/redis/meta/main.yml b/playbooks/roles/redis/meta/main.yml new file mode 100644 index 00000000000..4c1995126fa --- /dev/null +++ b/playbooks/roles/redis/meta/main.yml @@ -0,0 +1,23 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role redis +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - common diff --git a/playbooks/roles/redis/tasks/main.yml b/playbooks/roles/redis/tasks/main.yml new file mode 100644 index 00000000000..7d46a71baa9 --- /dev/null +++ b/playbooks/roles/redis/tasks/main.yml @@ -0,0 +1,69 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role redis +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: add the redis repo signing key + apt_key: + url: "https://packages.redis.io/gpg" + state: present + retries: 3 + register: add_repo_signing_key + tags: + - "install" + - "install:app-requirements" + until: add_repo_signing_key is succeeded + +- name: add the redis repo to the sources list + apt_repository: + repo: "{{ REDIS_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + +- name: Install redis packages + apt: + name: "{{ redis_debian_pkgs }}" + install_recommends: yes + state: present + update_cache: yes + notify: + - reload redis + +- name: Pin redis package version + loop: "{{ redis_debian_pkgs }}" + dpkg_selections: + # Dpkg selection just wants the package name, not the package and version + # This turns "redis=6:6.2.6-3rl1~focal1" into just "redis" + name: "{{ item.split('=')[0] }}" + selection: hold + +- name: Update redis configuration + template: + src: "etc/redis/redis.conf.j2" + dest: "/etc/redis/redis.conf" + owner: root + group: "{{ redis_group }}" + mode: "0640" + notify: + - reload redis + diff --git a/playbooks/roles/redis/templates/etc/redis/redis.conf.j2 b/playbooks/roles/redis/templates/etc/redis/redis.conf.j2 new file mode 100644 index 00000000000..2bd794ba495 --- /dev/null +++ b/playbooks/roles/redis/templates/etc/redis/redis.conf.j2 @@ -0,0 +1,2056 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# Each address can be prefixed by "-", which means that redis will not fail to +# start if the address is not available. Being not available only refers to +# addresses that does not correspond to any network interfece. Addresses that +# are already in use will always fail, and unsupported protocols will always BE +# silently skipped. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses +# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 +# bind * -::* # like the default, all available interfaces +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# will only be able to accept client connections from the same host that it is +# running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT OUT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind {{ REDIS_BIND_IP }} + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /run/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Normally Redis uses the same certificate for both server functions (accepting +# connections) and client functions (replicating from a master, establishing +# cluster bus connections, etc.). +# +# Sometimes certificates are issued with attributes that designate them as +# client-only or server-only certificates. In that case it may be desired to use +# different certificates for incoming (server) and outgoing (client) +# connections. To do that, use the following directives: +# +# tls-client-cert-file client.crt +# tls-client-key-file client.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-client-key-file-pass secret + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# When Redis is supervised by upstart or systemd, this parameter has no impact. +daemonize yes + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# on startup, and updating Redis status on a regular +# basis. +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +# +# The default is "no". To run under upstart/systemd, you can simply uncomment +# the line below: +# +supervised auto + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# +# Note that on modern Linux systems "/run/redis.pid" is more conforming +# and should be used instead. +pidfile /run/redis/redis-server.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile /var/log/redis/redis-server.log + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# To disable the built in crash log, which will possibly produce cleaner core +# dumps when they are needed, uncomment the following: +# +# crash-log-enabled no + +# To disable the fast memory check that's run as part of the crash log, which +# will possibly let redis terminate sooner, uncomment the following: +# +# crash-memcheck-enabled no + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY and syslog logging is +# disabled. Basically this means that normally a logo is displayed only in +# interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo no + +# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# provide some runtime information. It is possible to disable this and leave +# the process name as executed by setting the following to no. +set-proc-title yes + +# When changing the process title, Redis uses the following template to construct +# the modified title. +# +# Template variables are specified in curly brackets. The following variables are +# supported: +# +# {title} Name of process as executed if parent, or type of child process. +# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or +# Unix socket if only that's available. +# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". +# {port} TCP port listening on, or 0. +# {tls-port} TLS port listening on, or 0. +# {unixsocket} Unix domain socket listening on, or "". +# {config-file} Name of configuration file used. +# +proc-title-template "{title} {listen-addr} {server-mode}" + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save +# +# Redis will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 key changed +# * After 300 seconds (5 minutes) if at least 100 keys changed +# * After 60 seconds if at least 10000 keys changed +# +# You can set these explicitly by uncommenting the three following lines. +# +# save 3600 1 +# save 300 100 +# save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# Enables or disables full sanitation checks for ziplist and listpack etc when +# loading an RDB or RESTORE payload. This reduces the chances of a assertion or +# crash later on while processing commands. +# Options: +# no - Never perform full sanitation +# yes - Always perform full sanitation +# clients - Perform full sanitation only for user connections. +# Excludes: RDB files, RESTORE commands received from the master +# connection, and client connections which have the +# skip-sanitize-payload ACL flag. +# The default should be 'clients' but since it currently affects cluster +# resharding via MIGRATE, it is temporarily set to 'no' by default. +# +# sanitize-dump-payload no + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir {{ REDIS_PERSISTENCE_DIR }} + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if you know what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# ----------------------------------------------------------------------------- +# By default, Redis Sentinel includes all replicas in its reports. A replica +# can be excluded from Redis Sentinel's announcements. An unannounced replica +# will be ignored by the 'sentinel replicas ' command and won't be +# exposed to Redis Sentinel's clients. +# +# This option does not change the behavior of replica-priority. Even with +# replica-announced set to 'no', the replica can be promoted to master. To +# prevent this behavior, set replica-priority to 0. +# +# replica-announced yes + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# a radix key indexed by key name, what clients have which keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# skip-sanitize-payload RESTORE dump-payload sanitation is skipped. +# sanitize-payload RESTORE dump-payload is sanitized (default). +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# & Add a glob-style pattern of Pub/Sub channels that can be +# accessed by the user. It is possible to specify multiple channel +# patterns. +# allchannels Alias for &* +# resetchannels Flush the list of allowed channel patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# The requirepass is not compatable with aclfile option and the ACL LOAD +# command, these will cause requirepass to be ignored. +# +{% if REDIS_PASSWORD %} +{# comment the password incase it has spaces. #} +requirepass "{{ REDIS_PASSWORD }}" +{% else %} +# requirepass foobared +{% endif %} + +# New users are initialized with restrictive permissions by default, via the +# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default +# defaults to the 'allchannels' permission. +# +# Future compatibility note: it is very likely that in a future version of Redis +# the directive's default of 'allchannels' will be changed to 'resetchannels' in +# order to provide better out-of-the-box Pub/Sub security. Therefore, it is +# recommended that you explicitly define Pub/Sub permissions for all users +# rather then rely on implicit default values. Once you've set explicit +# Pub/Sub for all existing users, you should uncomment the following line. +# +# acl-pubsub-default resetchannels + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +maxmemory {{ REDIS_MEMORY_LIMIT }} + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, when there are no suitable keys for +# eviction, Redis will return an error on write operations that require +# more memory. These are usually commands that create new keys, add data or +# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, +# SORT (due to the STORE argument), and EXEC (if the transaction includes any +# command that requires memory). +# +# The default is: +# +maxmemory-policy {{ REDIS_MAX_MEMORY_POLICY }} + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Eviction processing is designed to function well with the default setting. +# If there is an unusually large amount of write traffic, this value may need to +# be increased. Decreasing this value may reduce latency at the risk of +# eviction processing effectiveness +# 0 = minimum latency, 10 = default, 100 = process without regard to latency +# +# maxmemory-eviction-tenacity 10 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous +# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the +# commands. When neither flag is passed, this directive will be used to determine +# if the data should be deleted asynchronously. + +lazyfree-lazy-user-flush no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports three options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + + +#################### KERNEL transparent hugepage CONTROL ###################### + +# Usually the kernel Transparent Huge Pages control is set to "madvise" or +# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which +# case this config has no effect. On systems in which it is set to "always", +# redis will attempt to disable it specifically for the redis process in order +# to avoid latency problems specifically with fork(2) and CoW. +# If for some reason you prefer to keep it enabled, you can set this config to +# "no" and the kernel global to "always". + +disable-thp yes + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check https://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet call any write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value or +# set cluster-allow-replica-migration to 'no'. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# Turning off this option allows to use less automatic cluster configuration. +# It both disables migration to orphaned masters and migration from masters +# that became empty. +# +# Default is 'yes' (allow automatic migrations). +# +# cluster-allow-replica-migration yes + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the replica can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# In order to setup your cluster make sure to read the documentation +# available at https://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following four options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-tls-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client ports (for connections +# without and with TLS) and cluster message bus port. The information is then +# published in the header of the bus packets so that other nodes will be able to +# correctly map the address of the node publishing the information. +# +# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set +# to zero, then cluster-announce-port refers to the TLS port. Note also that +# cluster-announce-tls-port has no effect if cluster-tls is set to no. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-tls-port 6379 +# cluster-announce-port 0 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at https://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# t Stream commands +# d Module key type events +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxetd, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usual as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. +# +# gopher-enabled no + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entries limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/playbooks/roles/registrar/defaults/main.yml b/playbooks/roles/registrar/defaults/main.yml new file mode 100644 index 00000000000..342c93ea9d8 --- /dev/null +++ b/playbooks/roles/registrar/defaults/main.yml @@ -0,0 +1,161 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role registrar +# + +REGISTRAR_ENABLED: True + +# +# vars are namespace with the module name. +# +registrar_service_name: 'registrar' + +registrar_user: "{{ registrar_service_name }}" +registrar_home: "{{ COMMON_APP_DIR }}/{{ registrar_service_name }}" +registrar_app_dir: "{{ COMMON_APP_DIR }}/{{ registrar_service_name }}" +registrar_code_dir: "{{ registrar_app_dir }}/{{ registrar_service_name }}" +registrar_venvs_dir: "{{ registrar_app_dir }}/venvs" +registrar_venv_dir: "{{ registrar_venvs_dir }}/registrar" +registrar_celery_default_queue: 'registrar.default' + +REGISTRAR_USE_PYTHON38: True + +REGISTRAR_CELERY_ALWAYS_EAGER: false +REGISTRAR_CELERY_BROKER_TRANSPORT: 'redis' +REGISTRAR_CELERY_BROKER_USER: '' +REGISTRAR_CELERY_BROKER_PASSWORD: '' +REGISTRAR_CELERY_BROKER_HOSTNAME: '' +REGISTRAR_CELERY_BROKER_VHOST: '' + +registrar_environment: + REGISTRAR_CFG: '{{ COMMON_CFG_DIR }}/{{ registrar_service_name }}.yml' + +registrar_gunicorn_port: 8734 + +registrar_debian_pkgs: [] + +REGISTRAR_NGINX_PORT: '1{{ registrar_gunicorn_port }}' +REGISTRAR_SSL_NGINX_PORT: '4{{ registrar_gunicorn_port }}' + +REGISTRAR_DEFAULT_DB_NAME: 'registrar' +REGISTRAR_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +REGISTRAR_MYSQL_USER: 'registrar001' +REGISTRAR_MYSQL_PASSWORD: 'password' +REGISTRAR_MYSQL_CONN_MAX_AGE: 60 + +REGISTRAR_MEMCACHE: [ 'memcache' ] + +REGISTRAR_DJANGO_SETTINGS_MODULE: 'registrar.settings.production' +REGISTRAR_DOMAIN: 'localhost' +REGISTRAR_URL_ROOT: 'http://{{ REGISTRAR_DOMAIN }}:{{ REGISTRAR_NGINX_PORT }}' +REGISTRAR_API_ROOT: '{{ REGISTRAR_URL_ROOT }}/api' +REGISTRAR_LOGOUT_URL: '{{ REGISTRAR_URL_ROOT }}/logout/' + +REGISTRAR_LANG: 'en_US.UTF-8' +REGISTRAR_LANGUAGE_CODE: 'en' +REGISTRAR_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +REGISTRAR_SERVICE_USER: 'registrar_service_user' + +REGISTRAR_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ registrar_service_name }}' +REGISTRAR_MEDIA_ROOT: '{{ REGISTRAR_DATA_DIR }}/media' +REGISTRAR_MEDIA_URL: '/api/media/' + +REGISTRAR_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ REGISTRAR_MEDIA_ROOT }}' + MEDIA_URL: '{{ REGISTRAR_MEDIA_URL }}' + +# TODO: Let edx_django_service manage REGISTRAR_STATIC_ROOT in phase 2. +REGISTRAR_STATIC_ROOT: '{{ REGISTRAR_DATA_DIR }}/staticfiles' +REGISTRAR_STATIC_URL: '/static/' + +REGISTRAR_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +REGISTRAR_CORS_ORIGIN_ALLOW_ALL: false +REGISTRAR_CORS_ORIGIN_WHITELIST: [] + +REGISTRAR_CSRF_COOKIE_SECURE: false +REGISTRAR_CSRF_TRUSTED_ORIGINS: [] + +REGISTRAR_VERSION: 'master' + +REGISTRAR_GUNICORN_EXTRA: '' + +REGISTRAR_EXTRA_APPS: [] + +REGISTRAR_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +REGISTRAR_CERTIFICATE_LANGUAGES: + 'en': 'English' + 'es_419': 'Spanish' + +# Used to automatically configure OAuth2 Client +REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'registrar-sso-key' +REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'registrar-sso-secret' +REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'registrar-backend-service-key' +REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'registrar-backend-service-secret' +REGISTRAR_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +# API key for segment.io +REGISTRAR_SEGMENT_KEY: !!null + +REGISTRAR_DISCOVERY_BASE_URL: !!null +REGISTRAR_LMS_BASE_URL: !!null + +registrar_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ REGISTRAR_CERTIFICATE_LANGUAGES }}' + REGISTRAR_SERVICE_USER: '{{ REGISTRAR_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ REGISTRAR_LANGUAGE_COOKIE_NAME }}' + SEGMENT_KEY: "{{ REGISTRAR_SEGMENT_KEY }}" + DISCOVERY_BASE_URL: "{{ REGISTRAR_DISCOVERY_BASE_URL }}" + LMS_BASE_URL: "{{ REGISTRAR_LMS_BASE_URL }}" + CORS_ORIGIN_WHITELIST: "{{ REGISTRAR_CORS_ORIGIN_WHITELIST }}" + CSRF_TRUSTED_ORIGINS: "{{ REGISTRAR_CSRF_TRUSTED_ORIGINS }}" + CSRF_COOKIE_SECURE: "{{ REGISTRAR_CSRF_COOKIE_SECURE }}" + CELERY_TASK_ALWAYS_EAGER: '{{ REGISTRAR_CELERY_ALWAYS_EAGER }}' + CELERY_BROKER_TRANSPORT: '{{ REGISTRAR_CELERY_BROKER_TRANSPORT }}' + CELERY_BROKER_USER: '{{ REGISTRAR_CELERY_BROKER_USER }}' + CELERY_BROKER_PASSWORD: '{{ REGISTRAR_CELERY_BROKER_PASSWORD }}' + CELERY_BROKER_HOSTNAME: '{{ REGISTRAR_CELERY_BROKER_HOSTNAME }}' + CELERY_BROKER_VHOST: '{{ REGISTRAR_CELERY_BROKER_VHOST }}' + CELERY_TASK_DEFAULT_EXCHANGE: 'registrar' + CELERY_TASK_DEFAULT_ROUTING_KEY: 'registrar' + CELERY_TASK_DEFAULT_QUEUE: '{{ registrar_celery_default_queue }}' + +# See edx_django_service_automated_users for an example of what this should be +REGISTRAR_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) + +REGISTRAR_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +REGISTRAR_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +REGISTRAR_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +REGISTRAR_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +REGISTRAR_ENABLE_ADMIN_URLS_RESTRICTION: false + +REGISTRAR_ADMIN_URLS: + - admin + +# Worker settings +worker_django_settings_module: "{{ REGISTRAR_DJANGO_SETTINGS_MODULE }}" +REGISTRAR_CELERY_WORKERS: + - queue: '{{ registrar_celery_default_queue }}' + concurrency: 1 + monitor: True +registrar_workers: "{{ REGISTRAR_CELERY_WORKERS }}" + +registrar_post_migrate_commands: [] diff --git a/playbooks/roles/registrar/meta/main.yml b/playbooks/roles/registrar/meta/main.yml new file mode 100644 index 00000000000..362772f2627 --- /dev/null +++ b/playbooks/roles/registrar/meta/main.yml @@ -0,0 +1,56 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role registrar +# +dependencies: + - role: edx_django_service + edx_django_service_use_python38: '{{ REGISTRAR_USE_PYTHON38 }}' + edx_django_service_version: '{{ REGISTRAR_VERSION }}' + edx_django_service_name: '{{ registrar_service_name }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ registrar_service_name }}' + edx_django_service_user: '{{ registrar_user }}' + edx_django_service_config_overrides: '{{ registrar_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ registrar_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ registrar_gunicorn_port }}' + edx_django_service_django_settings_module: '{{ REGISTRAR_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ registrar_environment }}' + edx_django_service_gunicorn_extra: '{{ REGISTRAR_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ REGISTRAR_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ REGISTRAR_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ REGISTRAR_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ REGISTRAR_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ REGISTRAR_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ REGISTRAR_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ REGISTRAR_MEMCACHE }}' + edx_django_service_default_db_host: '{{ REGISTRAR_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ REGISTRAR_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ REGISTRAR_MYSQL_USER }}' + edx_django_service_db_password: '{{ REGISTRAR_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ REGISTRAR_MYSQL_CONN_MAX_AGE }}' + edx_django_service_extra_apps: '{{ REGISTRAR_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ REGISTRAR_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ REGISTRAR_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ REGISTRAR_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ registrar_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ REGISTRAR_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_api_root: '{{ REGISTRAR_API_ROOT }}' + edx_django_service_decrypt_config_enabled: '{{ REGISTRAR_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ REGISTRAR_COPY_CONFIG_ENABLED }}' + edx_django_service_migration_check_services: '{{ registrar_service_name }},{{ registrar_service_name }}-workers' + edx_django_service_enable_celery_workers: true + edx_django_service_workers: '{{ registrar_workers }}' + EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: '{{ REGISTRAR_ENABLE_ADMIN_URLS_RESTRICTION }}' + EDX_DJANGO_SERVICE_ADMIN_URLS: '{{ REGISTRAR_ADMIN_URLS }}' diff --git a/playbooks/roles/registrar/tasks/main.yml b/playbooks/roles/registrar/tasks/main.yml new file mode 100644 index 00000000000..bc07f5d8715 --- /dev/null +++ b/playbooks/roles/registrar/tasks/main.yml @@ -0,0 +1,23 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role registrar +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# + diff --git a/playbooks/roles/s3fs/defaults/main.yml b/playbooks/roles/s3fs/defaults/main.yml index 2fb1c3f19a1..a38f98e5d79 100644 --- a/playbooks/roles/s3fs/defaults/main.yml +++ b/playbooks/roles/s3fs/defaults/main.yml @@ -2,18 +2,18 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Vars for role s3fs # -s3fs_version: 's3fs-1.73' +S3FS_VERSION: 's3fs-1.73' s3fs_download_src: 'http://s3fs.googlecode.com/files/' -s3fs_archive: '{{ s3fs_version }}.tar.gz' +s3fs_archive: '{{ S3FS_VERSION }}.tar.gz' s3fs_download_url: '{{ s3fs_download_src }}/{{ s3fs_archive }}' s3fs_temp_dir: '/var/tmp' diff --git a/playbooks/roles/s3fs/tasks/main.yml b/playbooks/roles/s3fs/tasks/main.yml index 819d303ebba..325d4947a78 100644 --- a/playbooks/roles/s3fs/tasks/main.yml +++ b/playbooks/roles/s3fs/tasks/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # Tasks for role s3fs # @@ -29,13 +29,13 @@ # file: # path={{ item.mount_point }} owner={{ item.owner }} # group={{ item.group }} mode={{ item.mode }} state="directory" -# with_items: my_role_s3fs_mounts +# with_items: "{{ my_role_s3fs_mounts }}" # # - name: mount s3 buckets # mount: # name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs # opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted -# with_items: myrole_s3fs_mounts +# with_items: "{{ myrole_s3fs_mounts }}" # # Example play: # @@ -69,22 +69,22 @@ shell: /bin/tar -xzf {{ s3fs_archive }} chdir={{ s3fs_temp_dir }} - creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure + creates={{ s3fs_temp_dir }}/{{ S3FS_VERSION }}/configure - name: configure shell: ./configure - chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} - creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status + chdir={{ s3fs_temp_dir }}/{{ S3FS_VERSION }} + creates={{ s3fs_temp_dir }}/{{ S3FS_VERSION }}/config.status - name: make shell: /usr/bin/make - chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} - creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd + chdir={{ s3fs_temp_dir }}/{{ S3FS_VERSION }} + creates={{ s3fs_temp_dir }}/{{ S3FS_VERSION }}/src/s3cmd - name: make install shell: /usr/bin/make install - chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} + chdir={{ s3fs_temp_dir }}/{{ S3FS_VERSION }} diff --git a/playbooks/roles/security/defaults/main.yml b/playbooks/roles/security/defaults/main.yml new file mode 100644 index 00000000000..03dba03a365 --- /dev/null +++ b/playbooks/roles/security/defaults/main.yml @@ -0,0 +1,43 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role security +# + +# +# vars are namespace with the module name. +# +security_role_name: security +# set to true to enable unattended upgrades nightly +SECURITY_UNATTENDED_UPGRADES: false +# set to true to upgrade all packages nightly. false will only upgrade from security repo. +SECURITY_UPDATE_ALL_PACKAGES: false +# set to true to run aptitute safe-upgrade whenever ansible is run +SAFE_UPGRADE_ON_ANSIBLE: false +# set to true to run unattended-upgrade during ansible runs. This is expected to only install security udpates. +SECURITY_UPGRADE_ON_ANSIBLE: false + + +# +# OS packages +# + +security_debian_pkgs: + - aptitude + - unattended-upgrades + - gcc + +security_redhat_pkgs: + - yum-plugin-security + - yum-cron + + +SECURITY_DEBIAN_PKGS_BLACKLIST: [] + diff --git a/playbooks/roles/security/files/tmp/GHOST.c b/playbooks/roles/security/files/tmp/GHOST.c new file mode 100644 index 00000000000..189515abfc9 --- /dev/null +++ b/playbooks/roles/security/files/tmp/GHOST.c @@ -0,0 +1,44 @@ +/* + * GHOST vulnerability check + * http://www.openwall.com/lists/oss-security/2015/01/27/9 + * Usage: gcc GHOST.c -o GHOST && ./GHOST + */ + +#include +#include +#include +#include +#include + +#define CANARY "in_the_coal_mine" + +struct { + char buffer[1024]; + char canary[sizeof(CANARY)]; +} temp = { "buffer", CANARY }; + +int main(void) { + struct hostent resbuf; + struct hostent *result; + int herrno; + int retval; + + /*** strlen (name) = size_needed - sizeof (*host_addr) - sizeof (*h_addr_ptrs) - 1; ***/ + size_t len = sizeof(temp.buffer) - 16*sizeof(unsigned char) - 2*sizeof(char *) - 1; + char name[sizeof(temp.buffer)]; + memset(name, '0', len); + name[len] = '\0'; + + retval = gethostbyname_r(name, &resbuf, temp.buffer, sizeof(temp.buffer), &result, &herrno); + + if (strcmp(temp.canary, CANARY) != 0) { + puts("vulnerable"); + exit(EXIT_SUCCESS); + } + if (retval == ERANGE) { + puts("OK"); + exit(EXIT_SUCCESS); + } + puts("should not happen"); + exit(EXIT_FAILURE); +} diff --git a/playbooks/roles/security/tasks/main.yml b/playbooks/roles/security/tasks/main.yml new file mode 100644 index 00000000000..eefa8213ea1 --- /dev/null +++ b/playbooks/roles/security/tasks/main.yml @@ -0,0 +1,29 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role security +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- include: security-ubuntu.yml + when: ansible_distribution == 'Ubuntu' + +- include: security-amazon.yml + when: ansible_distribution == 'Amazon' + diff --git a/playbooks/roles/security/tasks/security-amazon.yml b/playbooks/roles/security/tasks/security-amazon.yml new file mode 100644 index 00000000000..513c9b4f343 --- /dev/null +++ b/playbooks/roles/security/tasks/security-amazon.yml @@ -0,0 +1,38 @@ +--- +#### Enable periodic security updates +- name: Install security packages + yum: + name: "{{ security_redhat_pkgs }}" + state: latest + update_cache: yes + +- name: Enable automatic start for update service + service: + name: yum-cron + enabled: yes + state: started + +- name: Update all system packages + yum: + name: '*' + state: latest + when: SAFE_UPGRADE_ON_ANSIBLE + +- name: Configure security auto-updates + lineinfile: + dest: /etc/yum/yum-cron.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^update_cmd', line: 'update_cmd = security' } + - { regexp: '^apply_updates', line: 'apply_updates = yes' } + +- name: "Take security updates during ansible runs" + command: "{{ item }}" + when: SECURITY_UPGRADE_ON_ANSIBLE + register: result_amazon + retries: 3 + until: result_amazon is succeeded + with_items: + - yum check-update --security + - yum update --security -y diff --git a/playbooks/roles/security/tasks/security-ubuntu.yml b/playbooks/roles/security/tasks/security-ubuntu.yml new file mode 100644 index 00000000000..d2b9a69eb7d --- /dev/null +++ b/playbooks/roles/security/tasks/security-ubuntu.yml @@ -0,0 +1,74 @@ +--- +#### Enable periodic security updates +- name: Install security packages + apt: + name: "{{ security_debian_pkgs }}" + state: latest + update_cache: yes + + +- name: Update all system packages + apt: + upgrade: safe + when: SAFE_UPGRADE_ON_ANSIBLE + +- name: Configure periodic unattended-upgrades + template: + src: "etc/apt/apt.conf.d/10periodic" + dest: "/etc/apt/apt.conf.d/10periodic" + owner: root + group: root + mode: "0644" + when: SECURITY_UNATTENDED_UPGRADES + +- name: Disable unattended-upgrades if Xenial (16.04) + command: "{{ item }}" + when: ansible_distribution_release == 'xenial' and not SECURITY_UNATTENDED_UPGRADES + register: result_ubuntu + retries: 3 + until: result_ubuntu is succeeded + with_items: + - "systemctl disable apt-daily.service" + - "systemctl disable apt-daily.timer" + - "systemctl disable apt-daily-upgrade.timer" + ignore_errors: true + +- name: Disable unattended-upgrades + file: + path: "/etc/apt/apt.conf.d/10periodic" + state: absent + when: not SECURITY_UNATTENDED_UPGRADES + +- name: Only unattended-upgrade from security repo + template: + src: "etc/apt/apt.conf.d/20unattended-upgrade" + dest: "/etc/apt/apt.conf.d/20unattended-upgrade" + owner: root + group: root + mode: "0644" + when: SECURITY_UNATTENDED_UPGRADES and not SECURITY_UPDATE_ALL_PACKAGES + +- name: Add debian blacklist + template: + src: "etc/apt/apt.conf.d/50unattended-upgrades" + dest: "/etc/apt/apt.conf.d/50unattended-upgrades" + owner: root + group: root + mode: "0644" + +- name: Disable security only updates on unattended-upgrades + file: + path: "/etc/apt/apt.conf.d/20unattended-upgrade" + state: absent + when: SECURITY_UPDATE_ALL_PACKAGES or not SECURITY_UNATTENDED_UPGRADES + +# We dry-run because unattended-upgrade is quiet, and only had -d (debug) not -v (verbose) +- name: "Take security updates during ansible runs" + command: "{{ item }}" + when: SECURITY_UPGRADE_ON_ANSIBLE + with_items: + - unattended-upgrade --dry-run + - unattended-upgrade + register: ubuntu_security + retries: 10 + until: ubuntu_security is succeeded diff --git a/playbooks/roles/security/templates/etc/apt/apt.conf.d/10periodic b/playbooks/roles/security/templates/etc/apt/apt.conf.d/10periodic new file mode 100644 index 00000000000..20c4b2949dd --- /dev/null +++ b/playbooks/roles/security/templates/etc/apt/apt.conf.d/10periodic @@ -0,0 +1,5 @@ +APT::Periodic::Enable "1"; +APT::Periodic::Update-Package-Lists "1"; +APT::Periodic::Download-Upgradeable-Packages "1"; +APT::Periodic::AutocleanInterval "7"; +APT::Periodic::Unattended-Upgrade "1"; diff --git a/playbooks/roles/security/templates/etc/apt/apt.conf.d/20unattended-upgrade b/playbooks/roles/security/templates/etc/apt/apt.conf.d/20unattended-upgrade new file mode 100644 index 00000000000..6dc92f3b131 --- /dev/null +++ b/playbooks/roles/security/templates/etc/apt/apt.conf.d/20unattended-upgrade @@ -0,0 +1,4 @@ + +Unattended-Upgrade::Allowed-Origins { + "${distro_id} ${distro_codename}-security"; +}; diff --git a/playbooks/roles/security/templates/etc/apt/apt.conf.d/50unattended-upgrades b/playbooks/roles/security/templates/etc/apt/apt.conf.d/50unattended-upgrades new file mode 100644 index 00000000000..a099aa23619 --- /dev/null +++ b/playbooks/roles/security/templates/etc/apt/apt.conf.d/50unattended-upgrades @@ -0,0 +1,5 @@ +Unattended-Upgrade::Package-Blacklist{ + {% for blacklisted_item in SECURITY_DEBIAN_PKGS_BLACKLIST %} + {{ blacklisted_item }}; + {% endfor %} +} \ No newline at end of file diff --git a/playbooks/roles/server_utils/defaults/main.yml b/playbooks/roles/server_utils/defaults/main.yml new file mode 100644 index 00000000000..6e0011059d3 --- /dev/null +++ b/playbooks/roles/server_utils/defaults/main.yml @@ -0,0 +1,44 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role server_utils +# + +# +# vars are namespaced with the module name. +# +server_utils_role_name: server_utils + +# +# OS packages +# + +server_utils_debian_pkgs: + # not sure why this is installed + - ack-grep + # not sure why this is installed + - mosh + # not sure why this is installed + # Not installed by default on vagrant ubuntu + # boxes. + # TODO: move to Vagrant role + - tree + - screen + - tmux + - curl + - vim + - dnsutils + - inetutils-telnet + - netcat + +server_utils_redhat_pkgs: [] + +SERVER_UTILS_EDX_PPA_KEY_ID: "69464050" +SERVER_UTILS_EDX_PPA_KEY_SERVER: "keyserver.ubuntu.com" diff --git a/playbooks/roles/server_utils/meta/main.yml b/playbooks/roles/server_utils/meta/main.yml new file mode 100644 index 00000000000..68067364fc2 --- /dev/null +++ b/playbooks/roles/server_utils/meta/main.yml @@ -0,0 +1,14 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role insights +# + + diff --git a/playbooks/roles/server_utils/tasks/main.yml b/playbooks/roles/server_utils/tasks/main.yml new file mode 100644 index 00000000000..0f9e3d38b6c --- /dev/null +++ b/playbooks/roles/server_utils/tasks/main.yml @@ -0,0 +1,39 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role server_utils +# +# Overview: +# +# Install useful interactive utilities for triange and debugging purposes. +# Typically these would not need to be available on servers as shell access +# is uneccessary. +# +# Example play: +# +# + +- name: Check for expired edx key + command: "apt-key list | grep {{ SERVER_UTILS_EDX_PPA_KEY_ID }}" + register: ppa_key_status + when: ansible_distribution in common_debian_variants + +- name: remove expired edx key + command: "sudo apt-key adv --keyserver {{ SERVER_UTILS_EDX_PPA_KEY_SERVER }} --recv-keys {{ SERVER_UTILS_EDX_PPA_KEY_ID }}" + when: ansible_distribution in common_debian_variants and 'expired' in ppa_key_status.stdout + +- name: Install ubuntu system packages + apt: + name: "{{ server_utils_debian_pkgs }}" + install_recommends: yes + state: present + update_cache: yes + when: ansible_distribution in common_debian_variants diff --git a/playbooks/roles/shibboleth/defaults/main.yml b/playbooks/roles/shibboleth/defaults/main.yml index 79f225c411b..b83a6cdcd9d 100644 --- a/playbooks/roles/shibboleth/defaults/main.yml +++ b/playbooks/roles/shibboleth/defaults/main.yml @@ -8,3 +8,6 @@ shib: YOU NEED TO GENERATE A REAL KEY HERE USING OPENSSL sp_pem: | THE CORRESPONDING CERTIFICATE PEM GOES HERE +shib_template_dir: '.' +shib_metadata_backup_url: "https://idp.stanford.edu/Stanford-metadata.xml" +shib_download_metadata: true diff --git a/playbooks/roles/shibboleth/meta/main.yml b/playbooks/roles/shibboleth/meta/main.yml new file mode 100644 index 00000000000..e6d66a5175b --- /dev/null +++ b/playbooks/roles/shibboleth/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - apache diff --git a/playbooks/roles/shibboleth/tasks/main.yml b/playbooks/roles/shibboleth/tasks/main.yml index d3aee0711e3..7390775dc81 100644 --- a/playbooks/roles/shibboleth/tasks/main.yml +++ b/playbooks/roles/shibboleth/tasks/main.yml @@ -2,7 +2,7 @@ --- - name: Installs shib and dependencies from apt - apt: pkg={{item}} install_recommends=no state=present update_cache=yes + apt: pkg={{ item }} install_recommends=no state=present update_cache=yes with_items: - shibboleth-sp2-schemas - libshibsp-dev @@ -10,46 +10,34 @@ - libapache2-mod-shib2 - opensaml2-tools notify: restart shibd - tags: - - shib - - install - name: Creates /etc/shibboleth/metadata directory file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd - tags: - - shib - - install - name: Downloads metadata into metadata directory as backup - get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd - tags: - - shib - - install + get_url: + url: "{{ shib_metadata_backup_url }}" + dest: "/etc/shibboleth/metadata/idp-metadata.xml" + mode: 0640 + group: _shibd + owner: _shibd + when: shib_download_metadata - name: writes out key and pem file - template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600 + template: src=sp.{{ item }}.j2 dest=/etc/shibboleth/sp.{{ item }} group=_shibd owner=_shibd mode=0600 with_items: - key - pem notify: restart shibd - tags: - - shib - - install - name: writes out configuration files - template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644 + template: src={{ shib_template_dir }}/{{ item }}.j2 dest=/etc/shibboleth/{{ item }} group=_shibd owner=_shibd mode=0644 with_items: - attribute-map.xml - shibboleth2.xml notify: restart shibd - tags: - - shib - - install - name: enables shib command: a2enmod shib2 notify: restart shibd - tags: - - shib - - install diff --git a/playbooks/roles/simple_theme/README.rst b/playbooks/roles/simple_theme/README.rst new file mode 100644 index 00000000000..280a4150143 --- /dev/null +++ b/playbooks/roles/simple_theme/README.rst @@ -0,0 +1,65 @@ +Simple theme +############ + +This role allows you to deploy a basic theme on deploy time. The theme can be +customized via ansible variables in the following ways: +- to redefine SASS variables (like colors) +- to include some static files provided in a local directory (e.g. logo) +- to download some static files from URLs (e.g. logo, favicon) +- in addition the theme can be based on an existing theme from a repository + +This role will be included by edxapp. The main use case involves deploying a +theme as part of deploying an instance. The new theme will be enabled when +the instance starts. + +Configuration +************* +- The theme name for the deployed theme will be the one specifed in EDXAPP_DEFAULT_SITE_THEME +- The theme will be deployed to a directory of that name. + +You have the option to use a skeleton theme. This is the base theme that will be +copied to the target machine, and modified afterwards via the customizations +applied by this role's variables. + +Example: if you have a theme in https://github.com/open-craft/edx-theme/tree/harvard-dcex: +- Set EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO: "https://github.com/open-craft/edx-theme/" +- and EDXAPP_COMPREHENSIVE_THEME_VERSION: "harvard-dcex" + +If you don't use a skeleton theme, the deployed theme will just contain the SASS +variables definitions you provide through the other variables, and the static files +you provide. For simple changes like colors+logo+image this will be enough. + +Static files (like logo and favicon) will be added from the following sources and in +the following order: +- If no skeleton theme nor static files are provided, the theme will have no static files +- If a skeleton theme was provided, its static files will be used +- Local files from SIMPLETHEME_STATIC_FILES_DIR will be copied, replacing previous ones +- Files from SIMPLETHEME_STATIC_FILES_URLS will be downloaded, replacing previous ones + +Testing +******* + +The intended use of this role is to be run as part of deploy, not after it. + +There are other cases in which you may want to run the role independently (after + the instance is running): +- When testing this role. +- If you plan to use it to deploy theme changes. Be aware that this will + overwrite the old theme. + +You can use ansible-playbook to test this role independently. +It requires you to pass more variables manually because they're not available +except when running inside "edxapp" role. For instance you might need to pass + edxapp_user (e.g. "vagrant" if you test inside devstack). + +Example script to test this role, to be run from devstack, from "vagrant" user: +- export PYTHONUNBUFFERED=1 +- source /edx/app/edx_ansible/venvs/edx_ansible/bin/activate +- cd /edx/app/edx_ansible/edx_ansible/playbooks +- ansible-playbook -i localhost, -c local run_role.yml -e role=simple_theme -e CONFIGURATION_VERSION=master -e EDX_PLATFORM_VERSION=master -e EDXAPP_DEFAULT_SITE_THEME=mytheme2 -e '{"SIMPLETHEME_SASS_OVERRIDES": [{"variable": "link-color", "value":"#00b0f0"}, {"variable": "action-primary-bg", "value":"#ff8000"}, {"variable": "action-secondary-bg", "value":"#ff8000"}, {"variable": "theme-colors", "value":"(\"primary\": #ff8000, \"secondary\": #ff8000)"}, {"variable": "button-color", "value":"#ff8000"}], "SIMPLETHEME_EXTRA_SASS": ".global-header { background: #7ec832 } \n .wrapper-footer { background: #7ec832 }"}' -e EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO="https://github.com/open-craft/edx-theme/" -e EDXAPP_COMPREHENSIVE_THEME_VERSION="harvard-dcex" -e edxapp_user=vagrant -e common_web_group=www-data -e SIMPLETHEME_ENABLE_DEPLOY=true -e '{"SIMPLETHEME_STATIC_FILES_URLS": [{"url": "http://docs.ansible.com/ansible/latest/_static/images/logo_invert.png", "dest":"lms/static/images/logo.png"}, {"url": "http://docs.ansible.com/favicon.ico", "dest":"lms/static/images/favicon.ico"}]}' -e '{"EDXAPP_COMPREHENSIVE_THEME_DIRS":["/edx/var/edxapp/themes"], "EDXAPP_ENABLE_COMPREHENSIVE_THEMING": true}' + +Or, if you want to test the task as part of the deployment, change to role=edxapp, +and add --tags some-custom-tag-that-you-should-add-to-the-task + +Note, that header and footer background color need to be overriden using SIMPLETHEME_EXTRA_SASS variable, previously those colors were defined as SASS variables - `$header-bg` and `$footer-bg`. Since Hawthorn they are defined using bootstrap's theming mechanism. + diff --git a/playbooks/roles/simple_theme/defaults/main.yml b/playbooks/roles/simple_theme/defaults/main.yml new file mode 100644 index 00000000000..ac152d8ea30 --- /dev/null +++ b/playbooks/roles/simple_theme/defaults/main.yml @@ -0,0 +1,142 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Simple theme. Creates a basic theme at deploy time. +# +# See documentation in README.rst + +# +# This file contains the variables you'll need to pass to the role, and some +# example values. + + + +# Skeleton theme. Check README.rst +# EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO +# EDXAPP_COMPREHENSIVE_THEME_VERSION + + +# Enable/disable deploy +# This flag isn't at the edxapp role because of https://github.com/ansible/ansible/issues/19472 +SIMPLETHEME_ENABLE_DEPLOY: False + + +# This variable holds the main path where the role will copy files. +# EDXAPP_COMPREHENSIVE_THEME_DIRS.0 can be +# "/edx/var/edxapp/themes" or +# "/edx/app/edxapp/themes" or +# "/edx/app/edxapp/themes/edx-platform" +# or any other. +# If you have more than 1 theme dirs, you'll need to override this internal variable +simpletheme_folder: "{{ EDXAPP_COMPREHENSIVE_THEME_DIRS.0 }}/{{ EDXAPP_DEFAULT_SITE_THEME }}" + + +# Define SASS variables +# Apart from giving direct values like '#123456', you may give values that use +# previously defined variables, like '$some-variable', as long as this variable +# is earlier in the list. +# +# Sample configuration: +# SIMPLETHEME_SASS_OVERRIDES: +# - variable: main-color +# value: '#123456' +# - variable: action-primary-bg +# value: '$main-color' +# - variable: action-primary-fg +# value: '#fff' +# - variable: link-color +# value: 'red' +# - variable: action-secondary-bg +# value: '#07f' +# +SIMPLETHEME_SASS_OVERRIDES: [] + +# Files from the specified directory will be copied to the static/ directory. +# This is mainly designed to include images and JS. +# Expected file structure is e.g. +# - lms +# - images +# - logo.png +# - favicon.ico +# - js +# - myscript.js +# - cms +# - images +# - logo.png +# +# Paths will be transformed like this: +# lms/images/logo.png → lms/static/images/logo.png +# lms/js/myscript.js → lms/static/js/myscript.js +# etc. +# +# Sample: +# SIMPLETHEME_STATIC_FILES_DIR: "{{ role_path }}/files/example_static_dir" +SIMPLETHEME_STATIC_FILES_DIR: "" + + +# These files will be downloaded and included in the static directory after the +# files from SIMPLETHEME_STATIC_FILES_DIR have been copied. +# Local paths must be relative, e.g. "lms/static/images/favicon.ico" +# Example which downloads logo and favicon: +# SIMPLETHEME_STATIC_FILES_URLS: +# - url: http://docs.ansible.com/ansible/latest/_static/images/logo_invert.png +# dest: lms/static/images/logo.png +# - url: http://docs.ansible.com/favicon.ico +# dest: lms/static/images/favicon.ico +SIMPLETHEME_STATIC_FILES_URLS: [] + + +# This fragment will be inserted in _lms-overrides and will affect all pages +# Sample: +# SIMPLETHEME_EXTRA_SASS: | +# .header-global h1.logo a img { +# height: 50px; +# } +# .header-global.slim h2 { +# width: 60% !important; +# } +# .wrapper-footer { +# border-top: 3px solid $main-color; +# } +SIMPLETHEME_EXTRA_SASS: "" + + +# Use this variable to configure django translations. +# Note that edx-platform does not pick up translations from themes by default. +# You will have to manually configure either `COMPREHENSIVE_THEME_LOCALE_PATHS` or +# `PREPEND_LOCALE_PATHS` to include the path to the theme's i18n/locale folder for +# these translations to get picked up. +# +# The SIMPLETHEME_I18n_DJANGO variable should contain a list of dictionaries with these keys: +# - `lang`: the language code +# - `domain`: the i18n domain (typically one of "django" or "djangojs") +# - `headers`: (optional) Additional PO file headers. +# - `messages`: Translation messages. It should be a raw string of PO formatted messages. +# +# Samle: +# SIMPLETHEME_I18N_DJANGO: +# - lang: en +# domain: django +# headers: | +# "Plural-Forms: nplurals=2; plural=(n > 1);\n" +# messages: | +# msgid "my id" +# msgstr "my translation" +# +# msgid "one" +# msgid_plural "many" +# msgstr[0] "just one" +# msgstr[1] "a lot" +# - lang: en +# domain: djangojs +# messages: | +# msgid "my id" +# msgstr "my JS translation" +SIMPLETHEME_I18N_DJANGO: [] diff --git a/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/discussion/lms-discussion-bootstrap.scss b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/discussion/lms-discussion-bootstrap.scss new file mode 100644 index 00000000000..d4668edb4af --- /dev/null +++ b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/discussion/lms-discussion-bootstrap.scss @@ -0,0 +1,2 @@ +@import 'http://github.com/lms/static/sass/discussion/lms-discussion-bootstrap'; +@import 'http://github.com/lms-overrides'; diff --git a/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/lms-course.scss b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/lms-course.scss new file mode 100644 index 00000000000..eb031002d15 --- /dev/null +++ b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/lms-course.scss @@ -0,0 +1,2 @@ +@import 'http://github.com/lms/static/sass/lms-course'; +@import 'http://github.com/lms-overrides'; diff --git a/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/lms-main-v1.scss b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/lms-main-v1.scss new file mode 100644 index 00000000000..9395b5fc482 --- /dev/null +++ b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/lms-main-v1.scss @@ -0,0 +1,3 @@ +@import 'http://github.com/lms/static/sass/lms-main-v1'; + +@import 'http://github.com/lms-overrides'; diff --git a/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/partials/lms/theme/_variables-v1.scss b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/partials/lms/theme/_variables-v1.scss new file mode 100644 index 00000000000..5bdd4d5aaf3 --- /dev/null +++ b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/partials/lms/theme/_variables-v1.scss @@ -0,0 +1,2 @@ +@import 'http://github.com/common-variables'; +@import 'http://github.com/lms/static/sass/partials/lms/theme/variables-v1'; diff --git a/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/partials/lms/theme/_variables.scss b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/partials/lms/theme/_variables.scss new file mode 100644 index 00000000000..e40724fd7c1 --- /dev/null +++ b/playbooks/roles/simple_theme/files/default_skeleton/lms/static/sass/partials/lms/theme/_variables.scss @@ -0,0 +1,2 @@ +@import 'http://github.com/common-variables'; +@import 'http://github.com/lms/static/sass/partials/lms/theme/variables'; diff --git a/playbooks/roles/simple_theme/files/example_static_dir/cms/images/logo.png b/playbooks/roles/simple_theme/files/example_static_dir/cms/images/logo.png new file mode 100644 index 00000000000..8f092d94522 Binary files /dev/null and b/playbooks/roles/simple_theme/files/example_static_dir/cms/images/logo.png differ diff --git a/playbooks/roles/simple_theme/files/example_static_dir/lms/images/favicon.ico b/playbooks/roles/simple_theme/files/example_static_dir/lms/images/favicon.ico new file mode 100644 index 00000000000..13801059f16 Binary files /dev/null and b/playbooks/roles/simple_theme/files/example_static_dir/lms/images/favicon.ico differ diff --git a/playbooks/roles/simple_theme/files/example_static_dir/lms/images/logo.png b/playbooks/roles/simple_theme/files/example_static_dir/lms/images/logo.png new file mode 100644 index 00000000000..00d5852d00a Binary files /dev/null and b/playbooks/roles/simple_theme/files/example_static_dir/lms/images/logo.png differ diff --git a/playbooks/roles/simple_theme/files/example_static_dir/lms/js/myscript.js b/playbooks/roles/simple_theme/files/example_static_dir/lms/js/myscript.js new file mode 100644 index 00000000000..f7252b44e7f --- /dev/null +++ b/playbooks/roles/simple_theme/files/example_static_dir/lms/js/myscript.js @@ -0,0 +1 @@ +// sample script deployed by simple_theme diff --git a/playbooks/roles/simple_theme/meta/main.yml b/playbooks/roles/simple_theme/meta/main.yml new file mode 100644 index 00000000000..8ee711cbfcb --- /dev/null +++ b/playbooks/roles/simple_theme/meta/main.yml @@ -0,0 +1,11 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# + +dependencies: [] diff --git a/playbooks/roles/simple_theme/tasks/deploy.yml b/playbooks/roles/simple_theme/tasks/deploy.yml new file mode 100644 index 00000000000..759f3016b6f --- /dev/null +++ b/playbooks/roles/simple_theme/tasks/deploy.yml @@ -0,0 +1,181 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Simple theme +# +# See documentation in README.rst + +# Require comprehensive theming +# EDXAPP_COMPREHENSIVE_THEME_DIRS.0 is usually "/edx/app/edxapp/themes" +- assert: + that: + - "EDXAPP_COMPREHENSIVE_THEME_DIRS | length > 0" + - "EDXAPP_ENABLE_COMPREHENSIVE_THEMING" + msg: "Simple-theme deployment requires comprehensive theming to be enabled" + +- assert: + that: + - "EDXAPP_DEFAULT_SITE_THEME != ''" + msg: "Simple-theme needs to know the name of the deployed theme. Pass it in EDXAPP_DEFAULT_SITE_THEME" + + +- name: Check whether theme directory already exists + stat: path="{{ simpletheme_folder }}" + register: simpletheme_folder_stat + +# Note that if a theme already exists in the destination directory, it won't be +# deleted or redownloaded. It would be better to redownload, but for that we +# need https://github.com/ansible/ansible-modules-core/issues/5292 to be fixed, +# or to implement a workaround. +- block: + - name: Download skeleton theme + git: + repo: "{{ EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO }}" + dest: "{{ simpletheme_folder }}" + version: "{{ EDXAPP_COMPREHENSIVE_THEME_VERSION | default('master') }}" + # force: yes # Disabled due to ansible bug, see above + # Done in a separate step because "git:" doesn't have owner/group parameters + - name: Adjust owner/group of downloaded skeleton theme + file: + dest: "{{ simpletheme_folder }}" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + recurse: yes + when: EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO != "" and not simpletheme_folder_stat.stat.exists + +# If no skeleton theme, we still need some SASS files to include our SASS partials +- block: + - name: Create default skeleton (dirs) + file: + path: "{{ simpletheme_folder }}/{{ item.path }}" + state: directory + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_filetree: "../files/default_skeleton" + when: item.state == 'directory' + - name: Create default skeleton (files) + copy: + src: "{{ item.src }}" + dest: "{{ simpletheme_folder }}/{{ item.path }}" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_filetree: "../files/default_skeleton" + when: item.state != 'directory' + when: EDXAPP_COMPREHENSIVE_THEME_SOURCE_REPO == "" and not simpletheme_folder_stat.stat.exists + +# These are directories to hold the compiled templates included in this role +- name: Create directory to hold the theme and styles + file: + path: "{{ simpletheme_folder }}/{{ item }}" + state: directory + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_items: + - "." + - "lms/static/sass/partials/lms/theme" + +- name: Compile the templates + template: + src: "{{ item }}.j2" + dest: "{{ simpletheme_folder }}/{{ item }}" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_items: + # List of files from ./templates to be processed + - "lms/static/sass/common-variables.scss" + - "lms/static/sass/_lms-overrides.scss" + +# Copying static files is done in two steps: create directories + copy files +# (while renaming their path to add "static/"). There could be a 1-step solution, +# e.g requesting with_filetree with depth 1 (if this is possible in ansible). +# Note: with_fileglob doesn't take directories, but with_filetree does. + +- block: + - name: Create directories for static files to be copied + file: + path: "{{ simpletheme_folder }}/{{ item.path | regex_replace('^([^/]+)/(.+)$','\\1/static/\\2') }}" + state: directory + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_filetree: "{{ SIMPLETHEME_STATIC_FILES_DIR }}" + when: item.state == 'directory' + - name: Copy static files (adding "static/") + copy: + src: "{{ item.src }}" + dest: "{{ simpletheme_folder }}/{{ item.path | regex_replace('^([^/]+)/(.+)$','\\1/static/\\2') }}" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_filetree: "{{ SIMPLETHEME_STATIC_FILES_DIR }}" + when: item.state != 'directory' + when: SIMPLETHEME_STATIC_FILES_DIR != "" + + +# Downloading remote files is done in two steps: create directorie + download each file. +# This step is done after the static files from SIMPLETHEME_STATIC_FILES_DIR have been +# copied, therefore remote files may overwrite the previously installed static files. +- block: + - name: Create directories for static files to be downloaded + file: + path: "{{ simpletheme_folder }}/{{ item.dest | dirname }}" + state: directory + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_items: "{{ SIMPLETHEME_STATIC_FILES_URLS }}" + - name: Download static files to be included in theme + get_url: + url: "{{ item.url }}" + dest: "{{ simpletheme_folder }}/{{ item.dest }}" + force: yes + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_items: "{{ SIMPLETHEME_STATIC_FILES_URLS }}" + +# Handle translations. +- block: + - name: Install needed packages + apt: + name: gettext + state: present + update_cache: true + cache_valid_time: 3600 + - name: Create directories for django translations + file: + path: "{{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES" + state: directory + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_items: "{{ SIMPLETHEME_I18N_DJANGO }}" + - name: Make sure .po files exist + file: + path: "{{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES/{{ item.domain }}.po" + state: touch + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_items: "{{ SIMPLETHEME_I18N_DJANGO }}" + - name: Create temporary .po files with custom translations + template: + src: "i18n/domain.po.j2" + dest: "{{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES/{{ item.domain }}.po_" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + with_items: "{{ SIMPLETHEME_I18N_DJANGO }}" + - name: Merge temporary .po files into default translations + shell: > + msgcat --use-first {{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES/{{ item.domain }}.po_ + {{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES/{{ item.domain }}.po > + {{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES/{{ item.domain }}.po + with_items: "{{ SIMPLETHEME_I18N_DJANGO }}" + - name: Compile .po files into .mo + shell: > + msgfmt {{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES/{{ item.domain }}.po + -o {{ simpletheme_folder }}/i18n/conf/locale/{{ item.lang }}/LC_MESSAGES/{{ item.domain }}.mo + with_items: "{{ SIMPLETHEME_I18N_DJANGO }}" + when: SIMPLETHEME_I18N_DJANGO | length > 0 diff --git a/playbooks/roles/simple_theme/tasks/main.yml b/playbooks/roles/simple_theme/tasks/main.yml new file mode 100644 index 00000000000..81bd82cef3c --- /dev/null +++ b/playbooks/roles/simple_theme/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# +# +# Simple theme +# +# See documentation in README.rst + +# This flag should have been called EDXAPP_SIMPLETHEME_DEPLOY +# but it would be incompatible with include_role because of +# https://github.com/ansible/ansible/issues/19472 +# Therefore we use "include" here (instead of "include_role") +# And because the theme is only needed for the web application, we skip running +# the role for the Celery workers +- include: deploy.yml + when: SIMPLETHEME_ENABLE_DEPLOY and (celery_worker is not defined or not (celery_worker|bool)) + diff --git a/playbooks/roles/simple_theme/templates/i18n/domain.po.j2 b/playbooks/roles/simple_theme/templates/i18n/domain.po.j2 new file mode 100644 index 00000000000..3e08854bee0 --- /dev/null +++ b/playbooks/roles/simple_theme/templates/i18n/domain.po.j2 @@ -0,0 +1,12 @@ +msgid "" +msgstr "" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: {{ item.lang }}\n" +{% if 'headers' in item -%} +{{ item.headers }} +{%- endif %} + +{% if 'messages' in item -%} +{{ item.messages }} +{%- endif %} diff --git a/playbooks/roles/simple_theme/templates/lms/static/sass/_lms-overrides.scss.j2 b/playbooks/roles/simple_theme/templates/lms/static/sass/_lms-overrides.scss.j2 new file mode 100644 index 00000000000..3a3a4905f87 --- /dev/null +++ b/playbooks/roles/simple_theme/templates/lms/static/sass/_lms-overrides.scss.j2 @@ -0,0 +1,4 @@ +@import 'http://github.com/common-variables'; +/* Extra SASS as defined by simple_theme starts here: */ +{{ SIMPLETHEME_EXTRA_SASS }} +/* Extra SASS as defined by simple_theme ends here. */ diff --git a/playbooks/roles/simple_theme/templates/lms/static/sass/common-variables.scss.j2 b/playbooks/roles/simple_theme/templates/lms/static/sass/common-variables.scss.j2 new file mode 100644 index 00000000000..1b1c579884c --- /dev/null +++ b/playbooks/roles/simple_theme/templates/lms/static/sass/common-variables.scss.j2 @@ -0,0 +1,5 @@ +/* Variables from simple_theme role start here */ +{% for item in SIMPLETHEME_SASS_OVERRIDES %} +${{ item.variable }}: {{ item.value }}; +{% endfor %} +/* Variables from simple_theme role end here */ diff --git a/playbooks/roles/simple_theme/templates/lms/static/sass/partials/lms/theme/_variables-v1.scss.j2 b/playbooks/roles/simple_theme/templates/lms/static/sass/partials/lms/theme/_variables-v1.scss.j2 new file mode 100644 index 00000000000..5bdd4d5aaf3 --- /dev/null +++ b/playbooks/roles/simple_theme/templates/lms/static/sass/partials/lms/theme/_variables-v1.scss.j2 @@ -0,0 +1,2 @@ +@import 'http://github.com/common-variables'; +@import 'http://github.com/lms/static/sass/partials/lms/theme/variables-v1'; diff --git a/playbooks/roles/snort/defaults/main.yml b/playbooks/roles/snort/defaults/main.yml new file mode 100644 index 00000000000..686f8a2695e --- /dev/null +++ b/playbooks/roles/snort/defaults/main.yml @@ -0,0 +1,5 @@ + +--- +SNORT_OINKCODE: 'oinkcode' +SNORT_RULES_URL: [ 'http://www.snort.org/pub-bin/oinkmaster.cgi/%7B%7B%20SNORT_OINKCODE%20%7D%7D/snortrules-snapshot-2931.tar.gz', + 'http://rules.emergingthreats.net/open/snort-2.9.0/emerging.rules.tar.gz' ] diff --git a/playbooks/roles/snort/tasks/main.yml b/playbooks/roles/snort/tasks/main.yml new file mode 100644 index 00000000000..3e9f9574264 --- /dev/null +++ b/playbooks/roles/snort/tasks/main.yml @@ -0,0 +1,59 @@ +--- +# install and configure snort IDS +# + +- name: install snort + apt: pkg={{ item }} state="present" + with_items: + - snort + - oinkmaster + +- name: configure snort + template: + src: etc/snort/snort.conf.j2 + dest: /etc/snort/snort.conf + owner: root + group: root + mode: 0644 + +- name: configure snort (debian) + template: + src: etc/snort/snort.debian.conf.j2 + dest: /etc/snort/snort.debian.conf + owner: root + group: root + mode: 0644 + +- name: configure oinkmaster + template: + src: etc/oinkmaster.conf.j2 + dest: /etc/oinkmaster.conf + owner: root + group: root + mode: 0644 + +- name: update snort + shell: oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/ + become: yes + +- name: snort service + service: + name: "snort" + state: "started" + +- name: open read permissions on snort logs + file: + name: "/var/log/snort" + state: "directory" + mode: "755" + +- name: install oinkmaster cronjob + template: + src: etc/cron.daily/oinkmaster.j2 + dest: /etc/cron.daily/oinkmaster + owner: root + group: root + mode: 0755 + + + diff --git a/playbooks/roles/snort/templates/etc/cron.daily/oinkmaster.j2 b/playbooks/roles/snort/templates/etc/cron.daily/oinkmaster.j2 new file mode 100644 index 00000000000..f6ad4bb3cf7 --- /dev/null +++ b/playbooks/roles/snort/templates/etc/cron.daily/oinkmaster.j2 @@ -0,0 +1,4 @@ +#! /bin/bash + +oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/ > /dev/null +service snort restart diff --git a/playbooks/roles/snort/templates/etc/oinkmaster.conf.j2 b/playbooks/roles/snort/templates/etc/oinkmaster.conf.j2 new file mode 100644 index 00000000000..470ab140696 --- /dev/null +++ b/playbooks/roles/snort/templates/etc/oinkmaster.conf.j2 @@ -0,0 +1,25 @@ +{% for url in SNORT_RULES_URL %} +url = {{ url }} +{% endfor %} + +# Ignore local.rules from the rules archive by default since we might +# have put some local rules in our own local.rules and we don't want it +# to get overwritten by the empty one from the archive after each +# update. + +skipfile local.rules + +# The file deleted.rules contains rules that have been deleted from +# other files, so there is usually no point in updating it. +skipfile deleted.rules + + +# Also skip snort.conf by default since we don't want to overwrite our +# own snort.conf if we have it in the same directory as the rules. If +# you have your own production copy of snort.conf in another directory, +# it may be really nice to check for changes in this file though, +# especially since variables are sometimes added or modified and +# new/old files are included/excluded. +skipfile snort.conf + + diff --git a/playbooks/roles/snort/templates/etc/snort/snort.conf.j2 b/playbooks/roles/snort/templates/etc/snort/snort.conf.j2 new file mode 100644 index 00000000000..9bd5732d09b --- /dev/null +++ b/playbooks/roles/snort/templates/etc/snort/snort.conf.j2 @@ -0,0 +1,636 @@ +#-------------------------------------------------- +# VRT Rule Packages Snort.conf +# +# For more information visit us at: +# http://www.snort.org Snort Website +# http://vrt-sourcefire.blogspot.com/ Sourcefire VRT Blog +# +# Mailing list Contact: snort-sigs@lists.sourceforge.net +# False Positive reports: fp@sourcefire.com +# Snort bugs: bugs@snort.org +# +# Compatible with Snort Versions: +# VERSIONS : 2.9.2.0 +# +# Snort build options: +# OPTIONS : --enable-ipv6 --enable-gre --enable-mpls --enable-targetbased --enable-decoder-preprocessor-rules --enable-ppm --enable-perfprofiling --enable-zlib --enable-active-response --enable-normalizer --enable-reload --enable-react --enable-flexresp3 +# +# Additional information: +# This configuration file enables active response, to run snort in +# test mode -T you are required to supply an interface -i +# or test mode will fail to fully validate the configuration and +# exit with a FATAL error +#-------------------------------------------------- + +################################################### +# This file contains a sample snort configuration. +# You should take the following steps to create your own custom configuration: +# +# 1) Set the network variables. +# 2) Configure the decoder +# 3) Configure the base detection engine +# 4) Configure dynamic loaded libraries +# 5) Configure preprocessors +# 6) Configure output plugins +# 7) Customize your rule set +# 8) Customize preprocessor and decoder rule set +# 9) Customize shared object rule set +################################################### + +################################################### +# Step #1: Set the network variables. For more information, see README.variables +################################################### + +# Setup the network addresses you are protecting +ipvar HOME_NET any + +# Set up the external network addresses. Leave as "any" in most situations +ipvar EXTERNAL_NET any +#ipvar EXTERNAL_NET !$HOME_NET + +# List of DNS servers on your network +ipvar DNS_SERVERS $HOME_NET + +# List of SMTP servers on your network +ipvar SMTP_SERVERS $HOME_NET + +# List of web servers on your network +ipvar HTTP_SERVERS $HOME_NET + +# List of sql servers on your network +ipvar SQL_SERVERS $HOME_NET + +# List of telnet servers on your network +ipvar TELNET_SERVERS $HOME_NET + +# List of ssh servers on your network +ipvar SSH_SERVERS $HOME_NET + +# List of ftp servers on your network +ipvar FTP_SERVERS $HOME_NET + +# List of sip servers on your network +ipvar SIP_SERVERS $HOME_NET + +# List of ports you run web servers on +portvar HTTP_PORTS [80,8000,18000,18010,18020,18030,18040,18050,18060,18070,18080,18090,18100] + +# List of ports you want to look for SHELLCODE on. +portvar SHELLCODE_PORTS !80 + +# List of ports you might see oracle attacks on +portvar ORACLE_PORTS 1024: + +# List of ports you want to look for SSH connections on: +portvar SSH_PORTS 22 + +# List of ports you run ftp servers on +portvar FTP_PORTS [21,2100,3535] + +# List of ports you run SIP servers on +portvar SIP_PORTS [5060,5061,5600] + +# other variables, these should not be modified +ipvar AIM_SERVERS [64.12.24.0/23,64.12.28.0/23,64.12.161.0/24,64.12.163.0/24,64.12.200.0/24,205.188.3.0/24,205.188.5.0/24,205.188.7.0/24,205.188.9.0/24,205.188.153.0/24,205.188.179.0/24,205.188.248.0/24] + +# Path to your rules files (this can be a relative path) +# Note for Windows users: You are advised to make this an absolute path, +# such as: c:\snort\rules +var RULE_PATH /etc/snort/rules +var SO_RULE_PATH /etc/snort/so_rules +var PREPROC_RULE_PATH /etc/snort/preproc_rules + +################################################### +# Step #2: Configure the decoder. For more information, see README.decode +################################################### + +# Stop generic decode events: +config disable_decode_alerts + +# Stop Alerts on experimental TCP options +config disable_tcpopt_experimental_alerts + +# Stop Alerts on obsolete TCP options +config disable_tcpopt_obsolete_alerts + +# Stop Alerts on T/TCP alerts +config disable_tcpopt_ttcp_alerts + +# Stop Alerts on all other TCPOption type events: +config disable_tcpopt_alerts + +# Stop Alerts on invalid ip options +config disable_ipopt_alerts + +# Alert if value in length field (IP, TCP, UDP) is greater th elength of the packet +# config enable_decode_oversized_alerts + +# Same as above, but drop packet if in Inline mode (requires enable_decode_oversized_alerts) +# config enable_decode_oversized_drops + +# Configure IP / TCP checksum mode +config checksum_mode: all + +# Configure maximum number of flowbit references. For more information, see README.flowbits +# config flowbits_size: 64 + +# Configure ports to ignore +# config ignore_ports: tcp 21 6667:6671 1356 +# config ignore_ports: udp 1:17 53 + +# Configure active response for non inline operation. For more information, see REAMDE.active +# config response: eth0 attempts 2 + +# Configure DAQ related options for inline operation. For more information, see README.daq +# +# config daq: +# config daq_dir: +# config daq_mode: +# config daq_var: +# +# ::= pcap | afpacket | dump | nfq | ipq | ipfw +# ::= read-file | passive | inline +# ::= arbitrary = ::= path as to where to look for DAQ module so's + +# Configure specific UID and GID to run snort as after dropping privs. For more information see snort -h command line options +# +# config set_gid: +# config set_uid: + +# Configure default snaplen. Snort defaults to MTU of in use interface. For more information see README +# +# config snaplen: +# + +# Configure default bpf_file to use for filtering what traffic reaches snort. For more information see snort -h command line options (-F) +# +# config bpf_file: +# + +# Configure default log directory for snort to log to. For more information see snort -h command line options (-l) +# +# config logdir: + + +################################################### +# Step #3: Configure the base detection engine. For more information, see README.decode +################################################### + +# Configure PCRE match limitations +config pcre_match_limit: 3500 +config pcre_match_limit_recursion: 1500 + +# Configure the detection engine See the Snort Manual, Configuring Snort - Includes - Config +config detection: search-method ac-split search-optimize max-pattern-len 20 + +# Configure the event queue. For more information, see README.event_queue +config event_queue: max_queue 8 log 3 order_events content_length + +################################################### +# Per packet and rule latency enforcement +# For more information see README.ppm +################################################### + +# Per Packet latency configuration +#config ppm: max-pkt-time 250, \ +# fastpath-expensive-packets, \ +# pkt-log + +# Per Rule latency configuration +#config ppm: max-rule-time 200, \ +# threshold 3, \ +# suspend-expensive-rules, \ +# suspend-timeout 20, \ +# rule-log alert + +################################################### +# Configure Perf Profiling for debugging +# For more information see README.PerfProfiling +################################################### + +#config profile_rules: print all, sort avg_ticks +#config profile_preprocs: print all, sort avg_ticks + +################################################### +# Step #4: Configure dynamic loaded libraries. +# For more information, see Snort Manual, Configuring Snort - Dynamic Modules +################################################### + +# path to dynamic preprocessor libraries +dynamicpreprocessor directory /usr/lib/snort_dynamicpreprocessor/ + +# path to base preprocessor engine +dynamicengine /usr/lib/snort_dynamicengine/libsf_engine.so + +# path to dynamic rules libraries +# dynamicdetection directory /usr/lib/snort_dynamicrules + +################################################### +# Step #5: Configure preprocessors +# For more information, see the Snort Manual, Configuring Snort - Preprocessors +################################################### + +# Inline packet normalization. For more information, see README.normalize +# Does nothing in IDS mode +preprocessor normalize_ip4 +preprocessor normalize_tcp: ips ecn stream +preprocessor normalize_icmp4 +preprocessor normalize_ip6 +preprocessor normalize_icmp6 + +# Target-based IP defragmentation. For more inforation, see README.frag3 +preprocessor frag3_global: max_frags 65536 +preprocessor frag3_engine: policy windows detect_anomalies overlap_limit 10 min_fragment_length 100 timeout 180 + +# Target-Based stateful inspection/stream reassembly. For more inforation, see README.stream5 +preprocessor stream5_global: track_tcp yes, \ + track_udp yes, \ + track_icmp no, \ + max_tcp 262144, \ + max_udp 131072, \ + max_active_responses 2, \ + min_response_seconds 5 +preprocessor stream5_tcp: policy windows, detect_anomalies, require_3whs 180, \ + overlap_limit 10, small_segments 3 bytes 150, timeout 180, \ + ports client 21 22 23 25 42 53 79 109 110 111 113 119 135 136 137 139 143 \ + 161 445 513 514 587 593 691 1433 1521 2100 3306 6070 6665 6666 6667 6668 6669 \ + 7000 8181 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779, \ + ports both 80 81 311 443 465 563 591 593 636 901 989 992 993 994 995 1220 1414 1830 2301 2381 2809 3128 3702 5250 7907 7001 7802 7777 7779 \ + 7801 7900 7901 7902 7903 7904 7905 7906 7908 7909 7910 7911 7912 7913 7914 7915 7916 \ + 7917 7918 7919 7920 8000 8008 8028 8080 8088 8118 8123 8180 8243 8280 8888 9090 9091 9443 9999 11371 \ + 18000 18010 18020 18030 18040 18050 18060 18070 18080 18090 18100 +preprocessor stream5_udp: timeout 180 + +# performance statistics. For more information, see the Snort Manual, Configuring Snort - Preprocessors - Performance Monitor +# preprocessor perfmonitor: time 300 file /var/snort/snort.stats pktcnt 10000 + +# HTTP normalization and anomaly detection. For more information, see README.http_inspect +preprocessor http_inspect: global iis_unicode_map unicode.map 1252 compress_depth 65535 decompress_depth 65535 +preprocessor http_inspect_server: server default \ + chunk_length 500000 \ + server_flow_depth 0 \ + client_flow_depth 0 \ + post_depth 65495 \ + oversize_dir_length 500 \ + max_header_length 750 \ + max_headers 100 \ + ports { 80 8000 18000 18010 18020 18030 18040 18050 18060 18070 18080 18090 18100 } \ + non_rfc_char { 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 } \ + enable_cookie \ + extended_response_inspection \ + inspect_gzip \ + normalize_utf \ + unlimited_decompress \ + apache_whitespace no \ + ascii no \ + bare_byte no \ + directory no \ + double_decode no \ + iis_backslash no \ + iis_delimiter no \ + iis_unicode no \ + multi_slash no \ + utf_8 no \ + u_encode yes \ + webroot no + +# ONC-RPC normalization and anomaly detection. For more information, see the Snort Manual, Configuring Snort - Preprocessors - RPC Decode +preprocessor rpc_decode: 111 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779 no_alert_multiple_requests no_alert_large_fragments no_alert_incomplete + +# Back Orifice detection. +preprocessor bo + +# FTP / Telnet normalization and anomaly detection. For more information, see README.ftptelnet +preprocessor ftp_telnet: global inspection_type stateful encrypted_traffic no +preprocessor ftp_telnet_protocol: telnet \ + ayt_attack_thresh 20 \ + normalize ports { 23 } \ + detect_anomalies +preprocessor ftp_telnet_protocol: ftp server default \ + def_max_param_len 100 \ + ports { 21 2100 3535 } \ + telnet_cmds yes \ + ignore_telnet_erase_cmds yes \ + ftp_cmds { ABOR ACCT ADAT ALLO APPE AUTH CCC CDUP } \ + ftp_cmds { CEL CLNT CMD CONF CWD DELE ENC EPRT } \ + ftp_cmds { EPSV ESTA ESTP FEAT HELP LANG LIST LPRT } \ + ftp_cmds { LPSV MACB MAIL MDTM MIC MKD MLSD MLST } \ + ftp_cmds { MODE NLST NOOP OPTS PASS PASV PBSZ PORT } \ + ftp_cmds { PROT PWD QUIT REIN REST RETR RMD RNFR } \ + ftp_cmds { RNTO SDUP SITE SIZE SMNT STAT STOR STOU } \ + ftp_cmds { STRU SYST TEST TYPE USER XCUP XCRC XCWD } \ + ftp_cmds { XMAS XMD5 XMKD XPWD XRCP XRMD XRSQ XSEM } \ + ftp_cmds { XSEN XSHA1 XSHA256 } \ + alt_max_param_len 0 { ABOR CCC CDUP ESTA FEAT LPSV NOOP PASV PWD QUIT REIN STOU SYST XCUP XPWD } \ + alt_max_param_len 200 { ALLO APPE CMD HELP NLST RETR RNFR STOR STOU XMKD } \ + alt_max_param_len 256 { CWD RNTO } \ + alt_max_param_len 400 { PORT } \ + alt_max_param_len 512 { SIZE } \ + chk_str_fmt { ACCT ADAT ALLO APPE AUTH CEL CLNT CMD } \ + chk_str_fmt { CONF CWD DELE ENC EPRT EPSV ESTP HELP } \ + chk_str_fmt { LANG LIST LPRT MACB MAIL MDTM MIC MKD } \ + chk_str_fmt { MLSD MLST MODE NLST OPTS PASS PBSZ PORT } \ + chk_str_fmt { PROT REST RETR RMD RNFR RNTO SDUP SITE } \ + chk_str_fmt { SIZE SMNT STAT STOR STRU TEST TYPE USER } \ + chk_str_fmt { XCRC XCWD XMAS XMD5 XMKD XRCP XRMD XRSQ } \ + chk_str_fmt { XSEM XSEN XSHA1 XSHA256 } \ + cmd_validity ALLO < int [ char R int ] > \ + cmd_validity EPSV < [ { char 12 | char A char L char L } ] > \ + cmd_validity MACB < string > \ + cmd_validity MDTM < [ date nnnnnnnnnnnnnn[.n[n[n]]] ] string > \ + cmd_validity MODE < char ASBCZ > \ + cmd_validity PORT < host_port > \ + cmd_validity PROT < char CSEP > \ + cmd_validity STRU < char FRPO [ string ] > \ + cmd_validity TYPE < { char AE [ char NTC ] | char I | char L [ number ] } > +preprocessor ftp_telnet_protocol: ftp client default \ + max_resp_len 256 \ + bounce yes \ + ignore_telnet_erase_cmds yes \ + telnet_cmds yes + + +# SMTP normalization and anomaly detection. For more information, see README.SMTP +preprocessor smtp: ports { 25 465 587 691 } \ + inspection_type stateful \ + b64_decode_depth 0 \ + qp_decode_depth 0 \ + bitenc_decode_depth 0 \ + uu_decode_depth 0 \ + log_mailfrom \ + log_rcptto \ + log_filename \ + log_email_hdrs \ + normalize cmds \ + normalize_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \ + normalize_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \ + normalize_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \ + normalize_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \ + max_command_line_len 512 \ + max_header_line_len 1000 \ + max_response_line_len 512 \ + alt_max_command_line_len 260 { MAIL } \ + alt_max_command_line_len 300 { RCPT } \ + alt_max_command_line_len 500 { HELP HELO ETRN EHLO } \ + alt_max_command_line_len 255 { EXPN VRFY ATRN SIZE BDAT DEBUG EMAL ESAM ESND ESOM EVFY IDENT NOOP RSET } \ + alt_max_command_line_len 246 { SEND SAML SOML AUTH TURN ETRN DATA RSET QUIT ONEX QUEU STARTTLS TICK TIME TURNME VERB X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \ + valid_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \ + valid_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \ + valid_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \ + valid_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \ + xlink2state { enabled } + +# Portscan detection. For more information, see README.sfportscan +# preprocessor sfportscan: proto { all } memcap { 10000000 } sense_level { low } + +# ARP spoof detection. For more information, see the Snort Manual - Configuring Snort - Preprocessors - ARP Spoof Preprocessor +# preprocessor arpspoof +# preprocessor arpspoof_detect_host: 192.168.40.1 f0:0f:00:f0:0f:00 + +# SSH anomaly detection. For more information, see README.ssh +preprocessor ssh: server_ports { 22 } \ + autodetect \ + max_client_bytes 19600 \ + max_encrypted_packets 20 \ + max_server_version_len 100 \ + enable_respoverflow enable_ssh1crc32 \ + enable_srvoverflow enable_protomismatch + +# SMB / DCE-RPC normalization and anomaly detection. For more information, see README.dcerpc2 +preprocessor dcerpc2: memcap 102400, events [co ] +preprocessor dcerpc2_server: default, policy WinXP, \ + detect [smb [139,445], tcp 135, udp 135, rpc-over-http-server 593], \ + autodetect [tcp 1025:, udp 1025:, rpc-over-http-server 1025:], \ + smb_max_chain 3, smb_invalid_shares ["C$", "D$", "ADMIN$"] + +# DNS anomaly detection. For more information, see README.dns +preprocessor dns: ports { 53 } enable_rdata_overflow + +# SSL anomaly detection and traffic bypass. For more information, see README.ssl +preprocessor ssl: ports { 443 465 563 636 989 992 993 994 995 7801 7802 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 }, trustservers, noinspect_encrypted + +# SDF sensitive data preprocessor. For more information see README.sensitive_data +preprocessor sensitive_data: alert_threshold 25 + +# SIP Session Initiation Protocol preprocessor. For more information see README.sip +preprocessor sip: max_sessions 10000, \ + ports { 5060 5061 5600 }, \ + methods { invite \ + cancel \ + ack \ + bye \ + register \ + options \ + refer \ + subscribe \ + update \ + join \ + info \ + message \ + notify \ + benotify \ + do \ + qauth \ + sprack \ + publish \ + service \ + unsubscribe \ + prack }, \ + max_uri_len 512, \ + max_call_id_len 80, \ + max_requestName_len 20, \ + max_from_len 256, \ + max_to_len 256, \ + max_via_len 1024, \ + max_contact_len 512, \ + max_content_len 1024 + +# IMAP preprocessor. For more information see README.imap +preprocessor imap: \ + ports { 143 } \ + b64_decode_depth 0 \ + qp_decode_depth 0 \ + bitenc_decode_depth 0 \ + uu_decode_depth 0 + +# POP preprocessor. For more information see README.pop +preprocessor pop: \ + ports { 110 } \ + b64_decode_depth 0 \ + qp_decode_depth 0 \ + bitenc_decode_depth 0 \ + uu_decode_depth 0 + +################################################### +# Step #6: Configure output plugins +# For more information, see Snort Manual, Configuring Snort - Output Modules +################################################### + +# unified2 +# Recommended for most installs +# output unified2: filename merged.log, limit 128, nostamp, mpls_event_types, vlan_event_types + +# Additional configuration for specific types of installs +# output alert_unified2: filename snort.alert, limit 128, nostamp +# output log_unified2: filename snort.log, limit 128, nostamp + +# syslog +output alert_syslog: LOG_AUTH LOG_ALERT + +# pcap +output log_tcpdump: tcpdump.log + +# database +# output database: alert, , user= password= test dbname= host= +# output database: log, , user= password= test dbname= host= +# +# On Debian Systems, the database configuration is kept in a separate file: +# /etc/snort/database.conf. +# This file can be empty, if you are not using any database information +# If you are using databases, please edit that file instead of this one, to +# ensure smoother upgrades to future versions of this package. +include database.conf +# + +# prelude +# output alert_prelude + +# metadata reference data. do not modify these lines +include classification.config +include reference.config + + +################################################### +# Step #7: Customize your rule set +# For more information, see Snort Manual, Writing Snort Rules +# +# NOTE: All categories are enabled in this conf file +################################################### + +# site specific rules +include $RULE_PATH/local.rules + +include $RULE_PATH/attack-responses.rules +include $RULE_PATH/backdoor.rules +include $RULE_PATH/bad-traffic.rules +# include $RULE_PATH/blacklist.rules +# include $RULE_PATH/botnet-cnc.rules +include $RULE_PATH/chat.rules +# include $RULE_PATH/content-replace.rules +include $RULE_PATH/ddos.rules +include $RULE_PATH/dns.rules +include $RULE_PATH/dos.rules +include $RULE_PATH/community-dos.rules +include $RULE_PATH/exploit.rules +include $RULE_PATH/community-exploit.rules +include $RULE_PATH/finger.rules +include $RULE_PATH/ftp.rules +include $RULE_PATH/community-ftp.rules +include $RULE_PATH/icmp.rules +include $RULE_PATH/icmp-info.rules +include $RULE_PATH/imap.rules +include $RULE_PATH/community-imap.rules +include $RULE_PATH/info.rules +include $RULE_PATH/misc.rules +include $RULE_PATH/multimedia.rules +include $RULE_PATH/mysql.rules +include $RULE_PATH/netbios.rules +include $RULE_PATH/nntp.rules +include $RULE_PATH/community-nntp.rules +include $RULE_PATH/oracle.rules +include $RULE_PATH/community-oracle.rules +include $RULE_PATH/other-ids.rules +include $RULE_PATH/p2p.rules +# include $RULE_PATH/phishing-spam.rules +include $RULE_PATH/policy.rules +# include $RULE_PATH/community-policy.rules +# include $RULE_PATH/community-inappropriate.rules +# include $RULE_PATH/community-game.rules +# include $RULE_PATH/community-misc.rules +include $RULE_PATH/pop2.rules +include $RULE_PATH/pop3.rules +include $RULE_PATH/rpc.rules +include $RULE_PATH/rservices.rules +# include $RULE_PATH/scada.rules +include $RULE_PATH/scan.rules +# Note: this rule is extremely chatty, enable with care +include $RULE_PATH/shellcode.rules +include $RULE_PATH/smtp.rules +include $RULE_PATH/community-smtp.rules +include $RULE_PATH/snmp.rules +# include $RULE_PATH/specific-threats.rules +# include $RULE_PATH/spyware-put.rules +include $RULE_PATH/sql.rules +include $RULE_PATH/telnet.rules +include $RULE_PATH/tftp.rules +include $RULE_PATH/virus.rules +include $RULE_PATH/community-virus.rules +include $RULE_PATH/community-bot.rules +# include $RULE_PATH/voip.rules +include $RULE_PATH/community-sip.rules +# Specific web server rules: +# include $RULE_PATH/web-activex.rules +include $RULE_PATH/web-attacks.rules +include $RULE_PATH/web-cgi.rules +include $RULE_PATH/web-client.rules +include $RULE_PATH/web-coldfusion.rules +include $RULE_PATH/web-frontpage.rules +include $RULE_PATH/web-iis.rules +include $RULE_PATH/web-misc.rules +include $RULE_PATH/web-php.rules +include $RULE_PATH/web-attacks.rules +include $RULE_PATH/community-sql-injection.rules +include $RULE_PATH/community-web-client.rules +include $RULE_PATH/community-web-dos.rules +include $RULE_PATH/community-web-iis.rules +include $RULE_PATH/community-web-misc.rules +include $RULE_PATH/community-web-php.rules +include $RULE_PATH/web-attacks.rules +include $RULE_PATH/community-sql-injection.rules +include $RULE_PATH/community-web-client.rules +include $RULE_PATH/community-web-dos.rules +include $RULE_PATH/community-web-iis.rules +include $RULE_PATH/community-web-misc.rules +include $RULE_PATH/community-web-php.rules +include $RULE_PATH/x11.rules + +################################################### +# Step #8: Customize your preprocessor and decoder alerts +# For more information, see README.decoder_preproc_rules +################################################### + +# decoder and preprocessor event rules +# include $PREPROC_RULE_PATH/preprocessor.rules +# include $PREPROC_RULE_PATH/decoder.rules +# include $PREPROC_RULE_PATH/sensitive-data.rules + +################################################### +# Step #9: Customize your Shared Object Snort Rules +# For more information, see http://vrt-sourcefire.blogspot.com/2009/01/using-vrt-certified-shared-object-rules.html +################################################### + +# dynamic library rules +# include $SO_RULE_PATH/bad-traffic.rules +# include $SO_RULE_PATH/chat.rules +# include $SO_RULE_PATH/dos.rules +# include $SO_RULE_PATH/exploit.rules +# include $SO_RULE_PATH/icmp.rules +# include $SO_RULE_PATH/imap.rules +# include $SO_RULE_PATH/misc.rules +# include $SO_RULE_PATH/multimedia.rules +# include $SO_RULE_PATH/netbios.rules +# include $SO_RULE_PATH/nntp.rules +# include $SO_RULE_PATH/pop3.rules +# include $SO_RULE_PATH/p2p.rules +# include $SO_RULE_PATH/smtp.rules +# include $SO_RULE_PATH/snmp.rules +# include $SO_RULE_PATH/specific-threats.rules +# include $SO_RULE_PATH/sql.rules +# include $SO_RULE_PATH/web-activex.rules +# include $SO_RULE_PATH/web-client.rules +# include $SO_RULE_PATH/web-iis.rules +# include $SO_RULE_PATH/web-misc.rules + +# Event thresholding or suppression commands. See threshold.conf +include threshold.conf diff --git a/playbooks/roles/snort/templates/etc/snort/snort.debian.conf.j2 b/playbooks/roles/snort/templates/etc/snort/snort.debian.conf.j2 new file mode 100644 index 00000000000..49821aaf934 --- /dev/null +++ b/playbooks/roles/snort/templates/etc/snort/snort.debian.conf.j2 @@ -0,0 +1,22 @@ +# snort.debian.config (Debian Snort configuration file) +# +# This file was generated by the post-installation script of the snort +# package using values from the debconf database. +# +# It is used for options that are changed by Debian to leave +# the original configuration files untouched. +# +# This file is automatically updated on upgrades of the snort package +# *only* if it has not been modified since the last upgrade of that package. +# +# If you have edited this file but would like it to be automatically updated +# again, run the following command as root: +# dpkg-reconfigure snort + +DEBIAN_SNORT_STARTUP="boot" +DEBIAN_SNORT_HOME_NET="" +DEBIAN_SNORT_OPTIONS="" +DEBIAN_SNORT_INTERFACE="eth0" +DEBIAN_SNORT_SEND_STATS="true" +DEBIAN_SNORT_STATS_RCPT="root" +DEBIAN_SNORT_STATS_THRESHOLD="1" diff --git a/playbooks/roles/splunk-server/defaults/main.yml b/playbooks/roles/splunk-server/defaults/main.yml new file mode 100644 index 00000000000..2c8f46190c3 --- /dev/null +++ b/playbooks/roles/splunk-server/defaults/main.yml @@ -0,0 +1,145 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role splunk-server +# + +# +# vars are namespaced with the module name. +# +SPLUNK_INDEXES: [] + +SPLUNK_ALERTS: [] + # A list of dicts with the following keys: + # + # name: (string, required) + # The name of the alert + # + # description: (string, optional) + # A description of the alert. Appears in the Splunk UI. + # + # email: (list[string], optional) + # List of email addresses to send to when alert is triggered + # + # message: (string, optional) + # Body of the alert email. You can include information from the alert via the tokens documented here: + # http://docs.splunk.com/Documentation/Splunk/6.4.1/Alert/EmailNotificationTokens + # + # search: (string, required) + # Splunk query to use + # + # schedule: (string, default: "*/15 * * * *") + # The cron-style schedule on which to run the alert + # + # counttype: ("number of events" | "number of hosts" | "number of sources" | "always", default: "number of events") + # + # comparison: ("greater than" | "less than" | "equal to" | "not equal to" | "drops by" | "rises by", default: "greater than") + # + # quantity: (number, default: 0) + # Alert will trigger when "counttype comparison quantity" is true, e.g. "number of events > 10" + # + # time: (string, default: "-15m") + # Events will be searched from this value until now. "rt" indicates "realtime". + # + # severity: ([1-6], {{splunk_alert_default_severity}}) + # The severity of the alert. 1-debug, 2-info, 3-warn, 4-error, 5-severe, 6-fatal + # + # digest_delay: (string, optional) + # Whether to send email digests at most every "digest_delay" rather than for every alert. e.g. 15m + # + # NB: None of the string values can contain newlines except "message" + +SPLUNK_FIELD_EXTRACTIONS: [] + # A list of dicts of the following form. + # source and sourcetype are mutually exclusive + # - sourcetype | source: + # name: + # regex: + +SPLUNK_DASHBOARDS: [] + # A list of paths of dashboard xml.j2 templates. + # xml templates must be of the form expected by Splunk. + # For an example, create a dashboard in the Splunk UI and go to Edit > Edit Source + # e.g. + # - {{ role_path }}/../../../secure-repo/path/to/templates/template.xml.j2 + +SPLUNK_APP_S3_PATH: 'BUCKET_S3_URL_GOES_HERE' + # This is the s3 url where splunk apps are stored + # A valid bucket name is not provided above because we dont want the bucket to be claimed by someone else + # + # example: s3://my-bucket-goes-here/my_optional_path + +SPLUNK_APP_DOWNLOAD_PATH: "{{ splunk_home }}/etc/apps/app_downloads" + # Where on the local filesystem we store the app tars that we download from s3 + +SPLUNK_APP_TARS: [] + # A list of dicts that contains tars of splunk apps to install, and their md5sum + # Download new apps from splunkbase(includes md5 hash): + # https://splunkbase.splunk.com/apps/ + # Put them in SPLUNK_APP_S3_PATH + # Add an entry to this dict + # Apps will be installed if md5sum is correct, otherwise the task will fail + # See https://docs.splunk.com/Documentation/UnixApp/5.2.3/User/InstalltheSplunkAppforUnixandLinux#Install_the_Splunk_App_for_Unix_and_Linux_from_the_command_line + # e.g. + # SPLUNK_APP_TARS: + # - file_name: "my-tar.tgz" + # md5sum: "253385a0a68caea04e81fdb4d3083dd9" + +SPLUNK_ALERT_DEFAULT_SEVERITY: "3" + + +SPLUNK_VOLUMES: [] + +# Should include protocol & a trailing slash, e.g. http://splunk.mydomain.com/ +SPLUNK_HOSTNAME: splunk.example.com +SPLUNK_SMTP_SERVER: smtp.example.com +SPLUNK_SMTP_USERNAME: username +SPLUNK_SMTP_PASSWORD: password +SPLUNK_FROM_ADDRESS: no-reply@example.com +SPLUNK_EMAIL_FOOTER: Generated by {{ SPLUNK_HOSTNAME }} +SPLUNK_SSL_HOSTNAME: splunk.example.com:443 + +# Enable this if you want to terminate TLS elsewhere +SPLUNK_PROXY_ENABLED: false +SPLUNK_PROXY_SCHEME: "https" +SPLUNK_PROXY_HOSTNAME: "{{ SPLUNK_HOSTNAME }}" +SPLUNK_PROXY_URL: "{{ SPLUNK_PROXY_SCHEME }}://{{ SPLUNK_PROXY_HOSTNAME }}" + +# SSL settings. Either all or none of these must be defined. +# For more details about setting up splunk with SSL, see +# https://openedx.atlassian.net/wiki/display/EdxOps/viewpage.action?pageId=40174184 +SPLUNK_SSL_CERT: !!null +SPLUNK_SSL_PASSWORD: !!null +SPLUNK_SSL_ROOT_CA: !!null + +splunk-server_role_name: splunk-server + +splunk_user: "splunk" +splunk_root: "/vol/splunk" +splunk_home: "/opt/splunk" + +splunk_hot_dir: "{{ splunk_root }}/hot" +splunk_thawed_dir: "{{ splunk_root }}/thawed" +splunk_cold_dir: "{{ splunk_root }}/cold" +splunk_frozen_dir: "{{ splunk_root }}/frozen" + +splunk_ssl_port: 9995 +splunk_cert_path: 'etc/auth/edxcerts' + +# +# OS packages +# + +splunk-server_debian_pkgs: [] + +splunk-server_redhat_pkgs: [] + +#S3 bucket to store data rolled from splunk cold bucket +SPLUNK_FROZEN_DB_S3_BUCKET: 'splunk-s3-frozen-bucket-name-here' diff --git a/playbooks/roles/splunk-server/meta/main.yml b/playbooks/roles/splunk-server/meta/main.yml new file mode 100644 index 00000000000..9d50d334212 --- /dev/null +++ b/playbooks/roles/splunk-server/meta/main.yml @@ -0,0 +1,20 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role splunk-server +dependencies: + - role: user + user_info: "{{ COMMON_USER_INFO }}" + - role: security + when: COMMON_SECURITY_UPDATES + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: mount_ebs + volumes: "{{ SPLUNK_VOLUMES }}" diff --git a/playbooks/roles/splunk-server/tasks/main.yml b/playbooks/roles/splunk-server/tasks/main.yml new file mode 100644 index 00000000000..dcfee03d575 --- /dev/null +++ b/playbooks/roles/splunk-server/tasks/main.yml @@ -0,0 +1,253 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role splunk-server +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# +# + +- name: Validate field extractions + fail: + msg: Please define either "source" or "sourcetype", not both or neither + when: ('source' in item and 'sourcetype' in item) or ('source' not in item and 'sourcetype' not in item) + with_items: "{{ SPLUNK_FIELD_EXTRACTIONS }}" + +- name: Make sure necessary dirs exist + file: + path: "{{ item }}" + owner: splunk + group: splunk + state: directory + with_items: + - "{{ splunk_home }}/{{ splunk_cert_path }}" + - "{{ splunk_home }}/etc/system/local" + +- name: Write SSL certs to disk + copy: + dest: "{{ splunk_home }}/{{ splunk_cert_path }}/{{ item.0 }}" + content: "{{ item.1 }}" + owner: splunk + group: splunk + mode: "0400" + when: SPLUNK_SSL_CERT is defined + with_together: + - [forwarder.pem, cacert.pem] + - ["{{ SPLUNK_SSL_CERT }}", "{{ SPLUNK_SSL_ROOT_CA }}"] + +- name: Create inputs configuration + template: + src: opt/splunk/etc/system/local/inputs.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/inputs.conf" + owner: splunk + group: splunk + mode: "0600" + +- name: Create web configuration + template: + src: opt/splunk/etc/system/local/web.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/web.conf" + owner: splunk + group: splunk + mode: "0600" + tags: + - "install" + - "install:configuration" + - "install:configuration:web" + +- name: Create props configuation + template: + src: opt/splunk/etc/system/local/props.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/props.conf" + owner: splunk + group: splunk + mode: "0600" + tags: + - "install" + - "install:configuration" + +- name: Create transforms configuation + template: + src: opt/splunk/etc/system/local/transforms.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/transforms.conf" + owner: splunk + group: splunk + mode: "0600" + tags: + - "install" + - "install:configuration" + +- name: Create bucket directories + file: + path: "{{ item }}" + state: directory + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + with_items: + - "{{ splunk_hot_dir }}" + - "{{ splunk_thawed_dir }}" + - "{{ splunk_cold_dir }}" + - "{{ splunk_frozen_dir }}" + tags: + - "install" + - "install:configuration" + +- name: Create configuration directory + file: + path: "{{ item }}" + state: directory + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + with_items: + - "{{ splunk_home }}/etc/apps/search/local" + tags: + - "install" + - "install:configuration" + +- name: configure splunk buckets + template: + src: "opt/splunk/etc/apps/search/local/indexes.conf.j2" + dest: "{{ splunk_home }}/etc/apps/search/local/indexes.conf" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + mode: 0600 + tags: + - "install" + - "install:configuration" + +- name: configure splunk email + template: + src: opt/splunk/etc/system/local/alert_actions.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/alert_actions.conf" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + mode: 0600 + tags: + - install + - install:configuration + +- name: configure splunk searches + template: + src: "opt/splunk/etc/apps/search/local/savedsearches.conf.j2" + dest: "{{ splunk_home }}/etc/apps/search/local/savedsearches.conf" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + mode: 0600 + tags: + - "install" + - "install:configuration" + when: SPLUNK_ALERTS is defined + +- name: configure splunk field extractions + template: + src: opt/splunk/etc/apps/search/local/props.conf.j2 + dest: "{{ splunk_home }}/etc/apps/search/local/props.conf" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + mode: 0700 + tags: + - install + - install:configuration + when: SPLUNK_FIELD_EXTRACTIONS is defined + +- name: Make dashboards directory + file: + state: directory + path: "{{ splunk_home }}/etc/apps/search/local/data/ui/views" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + +- name: configure splunk dashboards + template: + src: "{{ item }}" + dest: "{{ splunk_home }}/etc/apps/search/local/data/ui/views/{{ item.rstrip('.j2') | basename }}" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + mode: 0700 + with_items: "{{ SPLUNK_DASHBOARDS }}" + tags: + - install + - install:configuration + +- name: create splunk coldToFrozen script + template: + src: "opt/splunk/bin/coldToFrozenS3.j2" + dest: "{{ splunk_home }}/bin/coldToFrozenS3" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + mode: 0700 + tags: + - "install" + - "install:configuration" + +- name: create app download directory + file: + path: "{{ SPLUNK_APP_DOWNLOAD_PATH }}" + owner: "{{ splunk_user }}" + group: "{{ splunk_user }}" + mode: 0755 + state: directory + tags: + - install + - install:configuration + +- name: discover existing app tars + stat: path={{ SPLUNK_APP_DOWNLOAD_PATH }}/{{ item.file_name }} + register: app_present + tags: + - install + - install:configuration + with_items: + - "{{ SPLUNK_APP_TARS }}" + +- name: download missing tars + shell: "aws s3 cp {{ SPLUNK_APP_S3_PATH }}/{{ item.1.file_name }} {{ SPLUNK_APP_DOWNLOAD_PATH }}/{{ item.1.file_name }}" + tags: + - install + - install:configuration + when: not item.0.stat.exists + with_together: + - "{{ app_present.results }}" + - "{{ SPLUNK_APP_TARS }}" + +- name: verify app checksums + shell: "echo \"{{ item.md5sum }} {{ SPLUNK_APP_DOWNLOAD_PATH }}/{{ item.file_name }}\" | md5sum -c --status" + tags: + - install + - install:configuration + with_items: + - "{{ SPLUNK_APP_TARS }}" + +- name: install apps + unarchive: + copy: no + src: "{{ SPLUNK_APP_DOWNLOAD_PATH }}/{{ item.file_name }}" + dest: "{{ splunk_home }}/etc/apps" + tags: + - install + - install:configuration + with_items: + - "{{ SPLUNK_APP_TARS }}" + +- name: restart splunk + service: + name: splunk + state: restarted + tags: + - "install" + - "install:configuration" + - "restart" diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/bin/coldToFrozenS3.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/bin/coldToFrozenS3.j2 new file mode 100644 index 00000000000..7479450cb77 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/bin/coldToFrozenS3.j2 @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +s3_backup_bucket="{{ SPLUNK_FROZEN_DB_S3_BUCKET }}" + +if [ $# -lt 1 ]; then + echo "Usage: coldToFrozenS3 " + exit 1 +fi + +colddb=$1 +if [[ $colddb == */ ]]; then + colddb=${colddb::-1} +fi + +if [[ ! -d $colddb ]]; then + echo "Given splunk bucket is not a valid directory: $colddb" + exit 1 +elif [[ ! -d $colddb/rawdata ]]; then + echo "No rawadata directory, this not an index database: $colddb" + exit 1 +fi + +# We have splunk bucket path like this /SPLUNK/DB/PATH/$index/colddb/$splunkbucket +# and want to move on s3 bucket in format as s3://$s3_backup_bucket/$index/frozendb/$splunkbucket + +read -a segments <&1 +fi diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/indexes.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/indexes.conf.j2 new file mode 100644 index 00000000000..7bb4b97513e --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/indexes.conf.j2 @@ -0,0 +1,38 @@ +{% for index in SPLUNK_INDEXES %} +[{{ index.name }}] +{% if index.coldPath is defined %} +coldPath = {{ index.coldPath }} +{% else %} +coldPath = {{ splunk_cold_dir }}/{{ index.name }}/colddb +{% endif %} +{% if index.homePath is defined %} +homePath = {{ index.homePath }} +{% else %} +homePath = {{ splunk_hot_dir }}/{{ index.name }}/db +{% endif %} +{% if index.maxTotalDataSizeMB is defined %} +maxTotalDataSizeMB = {{ index.maxTotalDataSizeMB }} +{% endif %} +{% if index.thawedPath is defined %} +thawedPath = {{ index.thawedPath }} +{% else %} +thawedPath = {{ splunk_thawed_dir }}/{{ index.name }}/thaweddb +{% endif %} +{% if index.coldToFrozenScript is defined %} +coldToFrozenScript = {{ splunk_home }}/bin/coldToFrozenS3 +{% endif %} +{% if index.disabled is defined %} +disabled = {{ index.disabled }} +{% endif %} +{% if index.home is defined %} +home = {{ index.home }} +{% endif %} +{% if index.enableDataIntegrityControl is defined %} +enableDataIntegrityControl = {{ index.enableDataIntegrityControl }} +{% endif %} +{% if index.enableTsidxReduction is defined %} +enableTsidxReduction = {{ index.enableTsidxReduction }} +{% endif %} + +{% endfor %} + diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/props.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/props.conf.j2 new file mode 100644 index 00000000000..85d18bc11b3 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/props.conf.j2 @@ -0,0 +1,46 @@ +{% for extraction in SPLUNK_FIELD_EXTRACTIONS %} +{% if 'source' in extraction %} +[source::{{ extraction.source }}] +{% elif 'sourcetype' in extraction %} +[{{ extraction.sourcetype }}] +{% endif %} +{% if extraction.break_before is defined%} +BREAK_ONLY_BEFORE = {{ extraction.break_before }} +{% endif %} +{% if extraction.max_events is defined%} +MAX_EVENTS = {{ extraction.max_events }} +{% endif %} +{% if extraction.datetime_config is defined %} +DATETIME_CONFIG = {{ extraction.datetime_config }} +{% endif %} +{% if extraction.indexed_extractions is defined %} +INDEXED_EXTRACTIONS = {{ extraction.indexed_extractions }} +{% endif %} +{% if extraction.no_binary_check is defined %} +NO_BINARY_CHECK = {{ extraction.no_binary_check }} +{% endif %} +{% if extraction.timestamp_fields is defined %} +TIMESTAMP_FIELDS = {{ extraction.timestamp_fields }} +{% endif %} +{% if extraction.category is defined %} +category = {{ extraction.category }} +{% endif %} +{% if extraction.description is defined %} +description = {{ extraction.description }} +{% endif %} +{% if extraction.disabled is defined %} +disabled = {{ extraction.disabled }} +{% endif %} +{% if extraction.pulldown_type is defined %} +pulldown_type = {{ extraction.pulldown_type }} +{% endif %} +{% if extraction.name is defined %} +EXTRACT-{{ extraction.name }} = {{ extraction.regex }} +{% endif %} +{% if 'sourcetype' in extraction and extraction.sourcetype == "build_log" %} +EXTRACT-run-results = Setting status of .* and message: 'Build finished. (?P\d+) tests run, (?P\d+) skipped, (?P\d+) failed. +EXTRACT-error_msg = \n?ERROR: (?P[^\n]*) +EXTRACT-buildResult = Finished: (?P[A-Z]+)$ +{% endif %} + +{% endfor %} diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/savedsearches.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/savedsearches.conf.j2 new file mode 100644 index 00000000000..847a9f318cd --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/apps/search/local/savedsearches.conf.j2 @@ -0,0 +1,250 @@ +{# The format of this file is partially documented here: + http://docs.splunk.com/Documentation/Splunk/6.1/admin/savedsearchesconf +#} +{% for search in SPLUNK_ALERTS %} +[{{ search.name }}] +{% if search.email is defined %} +action.email = {{ search.email }} +{% endif %} +{% if search.email_format is defined %} +action.email.format = {{ search.email_format }} +{% endif %} +{% if search.email_alert is defined %} +{# Include a backslash before newlines to match splunk's wonky INI format #} +action.email.message.alert = {{ search.email_alert.split('\n') | join('\\\n')}} +{% endif %} +{% if search.email_inline is defined %} +action.email.inline = {{ search.email_inline }} +{% endif %} +{% if search.email_reportserverenabled is defined %} +action.email.reportServerEnabled = {{ search.email_reportserverenabled }} +{% endif %} +{% if search.email_priority is defined %} +action.email.priority = {{ search.email_priority }} +{% endif %} +{% if search.email_csv is defined %} +action.email.sendcsv = {{ search.email_csv }} +{% endif %} +{% if search.email_to is defined %} +action.email.to = {{ search.email_to }} +{% endif %} +{% if search.email_sendresults is defined %} +action.email.sendresults = {{ search.email_sendresults }} +{% endif %} +{% if search.email_report is defined %} +action.email.message.report = {{ search.email_report }} +{% endif %} +{% if search.email_sendpdf is defined %} +action.email.sendpdf = {{ search.email_sendpdf }} +{% endif %} +{% if search.email_include_search is defined %} +action.email.include.search = {{ search.email_include_search }} +{% endif %} +{% if search.email_bcc is defined %} +action.email.bcc = {{ search.email_bcc }} +{% endif %} +{% if search.email_cc is defined %} +action.email.cc = {{ search.email_cc }} +{% endif %} +{% if search.email_content_type is defined %} +action.email.content_type = {{ search.email_content_type }} +{% endif %} +{% if search.email_result_link is defined %} +action.email.include.results_link = {{ search.email_result_link }} +{% endif %} +{% if search.email_view_link is defined %} +action.email.include.view_link = {{ search.email_view_link }} +{% endif %} +{% if search.email_sub is defined %} +action.email.subject = {{ search.email_sub }} +{% endif %} +{% if search.email_sub_alert is defined %} +action.email.subject.alert = {{ search.email_sub_alert }} +{% endif %} +{% if search.email_sub_report is defined %} +action.email.subject.report = {{ search.email_sub_report }} +{% endif %} +{% if search.email_NSSsub is defined %} +action.email.useNSSubject = {{ search.email_NSSsub }} +{% endif %} +{% if search.email_trigger_time is defined %} +action.email.include.trigger_time = {{ search.email_trigger_time }} +{% endif %} +{% if search.email_trigger is defined %} +action.email.include.trigger = {{ search.email_trigger }} +{% endif %} +{% if search.timespan is defined %} +auto_summarize.timespan = {{ search.timespan }} +{% endif %} +{% if search.display_general_type is defined %} +display.general.type = {{ search.display_general_type }} +{% endif %} +{% if search.display_search_tab is defined %} +display.page.search.tab = {{ search.display_search_tab }} +{% endif %} +{% if search.search_pattern_sensitivity is defined %} +display.page.search.patterns.sensitivity = {{ search.search_pattern_sensitivity }} +{% endif %} +{% if search.enablesched is defined %} +enableSched = {{ search.enablesched }} +{% endif %} +{% if search.dispatch_app is defined %} +request.ui_dispatch_app = {{ search.dispatch_app }} +{% endif %} +{% if search.events_fields is defined %} +display.events.fields = ["{{ search.events_fields | list | join ("\",\"") }}"] +{% endif %} +{% if search.visual_chart_height is defined %} +display.visualizations.chartHeight = {{ search.visual_chart_height }} +{% endif %} +{% if search.visual_chart is defined %} +display.visualizations.charting.chart = {{ search.visual_chart }} +{% endif %} +{% if search.visual_show is defined %} +display.visualizations.show = {{ search.visual_show }} +{% endif %} +{% if search.visual_chart_y_axis is defined %} +display.visualizations.charting.axisY.scale = {{ search.visual_chart_y_axis }} +{% endif %} +{% if search.visual_beforelabel is defined %} +display.visualizations.singlevalue.beforeLabel = {{ search.visual_beforelabel }} +{% endif %} +{% if search.visual_underlabel is defined %} +display.visualizations.singlevalue.underLabel = {{ search.visual_underlabel }} +{% endif %} +{% if search.display_timerange is defined %} +display.general.timeRangePicker.show = {{ search.display_timerange }} +{% endif %} +{% if search.visual_drilldown is defined %} +display.visualizations.charting.drilldown = {{ search.visual_drilldown }} +{% endif %} +{% if search.display_chart_mode is defined %} +display.visualizations.charting.chart.stackMode = {{ search.display_chart_mode }} +{% endif %} +{% if search.visual_Y_axis_title is defined %} +display.visualizations.charting.axisTitleY.text = {{ search.visual_Y_axis_title }} +{% endif %} +{% if search.visual_X_axis_title is defined %} +display.visualizations.charting.axisTitleX.text = {{ search.visual_X_axis_title }} +{% endif %} +{% if search.visual_chart_legend_place is defined %} +display.visualizations.charting.legend.placement = {{ search.visual_chart_legend_place }} +{% endif %} +{% if search.visual_chart_nullvalue is defined %} +display.visualizations.charting.chart.nullValueMode = {{ search.visual_chart_nullvalue }} +{% endif %} +{% if search.visual_chart_layout is defined %} +display.visualizations.charting.layout.splitSeries = {{ search.visual_chart_layout }} +display.visualizations.charting.layout.splitSeries.allowIndependentYRanges = {{ search.visual_chart_layout_splitseries_y_range }} +{% endif %} +{% if search.visual_chart_legend_style is defined %} +display.visualizations.charting.legend.labelStyle.overflowMode = {{ search.visual_chart_legend_style }} +{% endif %} +{% if search.visual_chart_style is defined %} +display.visualizations.charting.chart.style = {{ search.visual_chart_style }} +{% endif %} +{% if search.visual_afterlabel is defined %} +display.visualizations.singlevalue.afterLabel = {{ search.visual_afterlabel }} +{% endif %} +{% if search.display_event_list_wrap is defined %} +display.events.list.wrap = {{ search.display_event_list_wrap }} +{% endif %} +{% if search.dispatch_sample_ratio is defined %} +dispatch.sample_ratio = {{ search.dispatch_sample_ratio }} +{% endif %} +{% if search.display_events_maxlines is defined %} +display.events.maxLines = {{ search.display_events_maxlines }} +{% endif %} +{% if search.display_events_rownum is defined %} +display.events.rowNumbers = {{ search.display_events_rownum }} +{% endif %} +{% if search.display_events_drilldown is defined %} +display.events.table.drilldown = {{ search.display_events_drilldown }} +{% endif %} +{% if search.display_events_table_wrap is defined %} +display.events.table.wrap = {{ search.display_events_table_wrap }} +{% endif %} +{% if search.events_type is defined %} +display.events.type = {{ search.events_type }} +{% endif %} +{% if search.stat_wrap is defined %} +display.statistics.wrap = {{ search.stat_wrap }} +{% endif %} +{% if search.schedule_window is defined %} +schedule_window = {{ search.schedule_window }} +{% endif %} +{% if search.search_mode is defined %} +display.page.search.mode = {{ search.search_mode }} +{% endif %} +{% if search.auto_summarize is defined %} +auto_summarize = {{ search.auto_summarize }} +{% endif %} +{% if search.suppress is defined %} +alert.suppress = {{ search.suppress }} +{% endif %} +{% if search.description is defined %} +description = {{ search.description }} +{% endif %} +{% if search.suppress_period is defined %} +alert.suppress.period = {{ search.suppress_period }} +{% endif %} +{% if search.severity is defined %} +alert.severity = {{ search.severity }} +{% endif %} +{% if search.track is defined %} +alert.track = {{ search.track }} +{% endif %} +{% if search.dispatch_as is defined %} +dispatchAs = {{ search.dispatch_as }} +{% endif %} +{% if search.opsgenie is defined %} +action.opsgenie = {{ search.opsgenie }}
 +action.opsgenie.param.api_url = {{ search.opsgenie_api_url }} +{% endif %} +{% if search.digest_mode is defined %} +alert.digest_mode = {{ search.digest_mode }} +{% endif %} +{% if search.stats is defined %} +display.statistics.show = {{ search.stats }} +{% endif %} +{% if search.alert_condition is defined %} +alert_condition = {{ search.alert_condition }} +{% endif %} +{% if search.counttype is defined %} +counttype = {{ search.counttype }} +{% endif %} +{% if search.displayview is defined %} +displayview = {{ search.displayview }} +{% endif %} +{% if search.dispatchview is defined %} +request.ui_dispatch_view = {{ search.dispatchview }} +{% endif %} +{% if search.schedule is defined %} +cron_schedule = {{ search.schedule }} +{% endif %} +{% if search.disabled is defined %} +disabled = {{ search.disabled }} +{% endif %} +{% if search.quantity is defined %} +quantity = {{ search.quantity }} +{% endif %} +{% if search.time is defined %} +dispatch.earliest_time = {{ search.time }} +{% endif %} +{% if search.relation is defined %} +relation = {{ search.relation }} +{% endif %} +{% if search.dispatch_earliest_time is defined %} +auto_summarize.dispatch.earliest_time = {{ search.dispatch_earliest_time }} +{% endif %} +{% if search.dispatch_latest_time is defined %} +dispatch.latest_time = {{ search.dispatch_latest_time }} +{% endif %} +{% if search.search is defined %} +search = {{ search.search }} +{% endif %} +{% if search.vsid is defined %} +vsid = {{ search.vsid }} +{% endif %} + +{% endfor %} diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/alert_actions.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/alert_actions.conf.j2 new file mode 100644 index 00000000000..065fc00024a --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/alert_actions.conf.j2 @@ -0,0 +1,12 @@ +[email] +auth_password = {{ SPLUNK_SMTP_PASSWORD }} +auth_username = {{ SPLUNK_SMTP_USERNAME }} +footer.text = {{ SPLUNK_EMAIL_FOOTER }} +hostname = {{ SPLUNK_SSL_HOSTNAME }} +mailserver = {{ SPLUNK_SMTP_SERVER }} +reportServerURL = +use_tls = 1 +pdf.header_left = none +pdf.header_right = none +use_ssl = 0 +from = {{ SPLUNK_FROM_ADDRESS }} diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/inputs.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/inputs.conf.j2 new file mode 100644 index 00000000000..56049a71494 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/inputs.conf.j2 @@ -0,0 +1,13 @@ +[default] +host = {{ SPLUNK_HOSTNAME }} + +{% if SPLUNK_SSL_CERT %} +[splunktcp-ssl:{{ splunk_ssl_port }}] +compressed = true + +[SSL] +password = {{ SPLUNK_SSL_PASSWORD }} +requireClientCert = false +rootCA = $SPLUNK_HOME/{{ splunk_cert_path }}/cacert.pem +serverCert = $SPLUNK_HOME/{{ splunk_cert_path }}/forwarder.pem +{% endif %} diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/props.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/props.conf.j2 new file mode 100644 index 00000000000..ac63d159202 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/props.conf.j2 @@ -0,0 +1,49 @@ +# Version 6.2.1 +# DO NOT EDIT THIS FILE! +# Changes to default files will be lost on update and are difficult to +# manage and support. +# +# Please make any changes to system defaults by overriding them in +# apps or $SPLUNK_HOME/etc/system/local +# (See "Configuration file precedence" in the web documentation). +# +# To override a specific setting, copy the name of the stanza and +# setting to the file where you wish to override it. +# +# This file contains possible attribute/value pairs for configuring +# Splunk's processing properties. +# + +[junit] +KV_MODE = xml +TRUNCATE = 100000 +BREAK_ONLY_BEFORE = <\?xml version= +MAX_EVENTS = 10000 +SHOULD_LINEMERGE = False +LINE_BREAKER = (?s)(\s+).+?[^<][^\/][^c][^a][^s][^e][^>] + +[build_result] +KV_MODE = xml +BREAK_ONLY_BEFORE = <\?xml version= +SHOULD_LINEMERGE = True +MAX_EVENTS = 10000 + +[sitespeed_result] +SHOULD_LINEMERGE = true +INDEXED_EXTRACTIONS = json +NO_BINARY_CHECK = true +KV_MODE = none +TRUNCATE = 0 + +[jenkins_build] +BREAK_ONLY_BEFORE = +DATETIME_CONFIG = +NO_BINARY_CHECK = true +category = Application +description = Jenkins build logs +disabled = false +maxDist = 75 +pulldown_type = 1 + +[syslog] +TRANSFORMS-anonymize = ansible-output diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/transforms.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/transforms.conf.j2 new file mode 100644 index 00000000000..76d0a4847f0 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/transforms.conf.j2 @@ -0,0 +1,6 @@ +[ansible-output] +REGEX = ansible-(.*) +# We want to anonymize Ansible command output in log events as it is +# logging passwords in Splunk index (See https://openedx.atlassian.net/browse/OPS-3241). +FORMAT = ansible-output: Anonymized for security.######################### +DEST_KEY = _raw diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/web.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/web.conf.j2 new file mode 100644 index 00000000000..cb3cb05d983 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/web.conf.j2 @@ -0,0 +1,15 @@ +[settings] +enableSplunkWebSSL = 0 +{% if SPLUNK_PROXY_ENABLED %} +tools.proxy.on = true +tools.proxy.base = {{ SPLUNK_PROXY_URL }} + +# If set to "0", prevents the application server from +# being run from splunkd. Instead, Splunk Web starts as +# a separate python-based service which directly listens to the +# httpport. This is how Splunk 6.1.X and earlier behaved. + +# If you dont set this - putting splunk behind a proxy that terminates TLS will not work, as it will complain about strict SSO Mode being set +# even if you set SSOMode = permissive +appServerPorts = 0 +{% endif %} diff --git a/playbooks/roles/splunkforwarder/defaults/main.yml b/playbooks/roles/splunkforwarder/defaults/main.yml index 988918255fc..7cba8d8b96d 100644 --- a/playbooks/roles/splunkforwarder/defaults/main.yml +++ b/playbooks/roles/splunkforwarder/defaults/main.yml @@ -2,39 +2,87 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Vars for role splunk -# +# # # vars are namespace with the module name. # splunk_role_name: 'splunk' -SPLUNKFORWARDER_SERVER: 'localhost:9997' - -SPLUNKFORWARDER_PACKAGE_LOCATION: !!null +SPLUNKFORWARDER_PACKAGE_URL: !!null SPLUNKFORWARDER_DEB: !!null +SPLUNKFORWARDER_RPM: !!null SPLUNKFORWARDER_PASSWORD: !!null +# A list of dicts with the following keys: +# target_group: the name of the group +# server: the hostname/IP address of the splunk server +# default: whether this group is the default logging group +# The following keys are for SSL configuration with the server in question. +# Either all must be defined, or none. +# ssl_cert: the text of the SSL cert to use +# ssl_cert_password: the password of the SSL cert +# ssl_root_ca: the root CA cert that signed the SSL cert +# ssl_common_name: the common name (CN) on the SSL cert +SPLUNKFORWARDER_SERVERS: + - target_group: "default_output_server" + server: "localhost:9997" + default: true + +# For more details about setting up splunk with SSL, see +# https://openedx.atlassian.net/wiki/display/EdxOps/viewpage.action?pageId=40174184 + +############################ DANGER WILL ROBINSON ############################# +# Splunk server only supports a single SSL cert for all connections! # +# If you ever need to rotate the cert, you will have to either take downtime # +# or let new logs buffer on the forwarders until you update them. # +# When you do update the forwarders, you can't simply roll out new AMIs since # +# there will be un-forwarded logs. Instead, you must run ansible against your # +# entire fleet. # +############################################################################### + +SPLUNKFORWARDER_HOST_VALUE: !!null + SPLUNKFORWARDER_LOG_ITEMS: - - directory: '{{ COMMON_LOG_DIR }}' + - source: '{{ COMMON_LOG_DIR }}/lms' recursive: true - index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}' + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' sourcetype: 'edx' - - directory: '/var/log' + - source: '{{ COMMON_LOG_DIR }}/cms' recursive: true - index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}' + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' + sourcetype: 'edx' + - source: '{{ COMMON_LOG_DIR }}/mongo' + recursive: true + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' + sourcetype: 'mongo' + - source: '{{ COMMON_LOG_DIR }}' + recursive: true + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' + sourcetype: 'syslog' + - source: '/var/log' + recursive: true + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' sourcetype: 'syslog' - - directory: '{{ COMMON_LOG_DIR }}/nginx' + - source: '{{ COMMON_LOG_DIR }}/nginx' recursive: true - index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}' + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' sourcetype: 'nginx' + - source: '{{ COMMON_LOG_DIR }}/rabbitmq' + recursive: true + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' + sourcetype: 'rabbitmq' + - source: '/var/log/neo4j' + recursive: true + index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}' + sourcetype: 'neo4j' # # OS packages @@ -45,3 +93,4 @@ splunk_debian_pkgs: splunk_redhat_pkgs: [] splunkforwarder_output_dir: '/opt/splunkforwarder/' +splunkforwarder_ssl_cert_path: 'etc/auth/edxcerts' diff --git a/playbooks/roles/splunkforwarder/handlers/main.yml b/playbooks/roles/splunkforwarder/handlers/main.yml index aedccb14147..a342f1fc209 100644 --- a/playbooks/roles/splunkforwarder/handlers/main.yml +++ b/playbooks/roles/splunkforwarder/handlers/main.yml @@ -2,19 +2,21 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # # Handlers for role splunk -# +# # Overview: -# +# # # Restart Splunk - name: restart splunkforwarder - service: name=splunk state=restarted + service: + name: SplunkForwarder + state: restarted diff --git a/playbooks/roles/splunkforwarder/tasks/main.yml b/playbooks/roles/splunkforwarder/tasks/main.yml index 4c81459eed6..e55c4e0a54f 100644 --- a/playbooks/roles/splunkforwarder/tasks/main.yml +++ b/playbooks/roles/splunkforwarder/tasks/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -21,84 +21,127 @@ # # -# Install Splunk Forwarder -- name: install splunkforwarder specific system packages - apt: pkg={{','.join(splunk_debian_pkgs)}} state=present +# Install Splunk Forwarder for common_debian_variants +- name: Install splunkforwarder specific system packages + apt: + name: "{{ splunk_debian_pkgs }}" + state: present tags: - - splunk - - install - - update + - splunk + - install + - update + when: ansible_distribution in common_debian_variants -- name: download the splunk deb - get_url: > - dest="/tmp/{{SPLUNKFORWARDER_DEB}}" - url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}" +- name: Download the splunk deb + get_url: + dest: "/tmp/{{ SPLUNKFORWARDER_DEB }}" + url: "{{ SPLUNKFORWARDER_PACKAGE_URL }}" register: download_deb + until: download_deb is succeeded + retries: 5 + when: ansible_distribution in common_debian_variants -- name: install splunk forwarder - shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}} - when: download_deb.changed +- name: Install splunk forwarder deb + shell: "gdebi -nq /tmp/{{ SPLUNKFORWARDER_DEB }}" + when: ansible_distribution in common_debian_variants and download_deb.changed -# Create splunk user -- name: create splunk user - user: name=splunk createhome=no state=present append=yes groups=syslog - when: download_deb.changed +# Install Splunk Forwarder for common_redhat_variants +- name: Download the splunk rpm + get_url: + dest: "/tmp/{{ SPLUNKFORWARDER_RPM }}" + url: "{{ SPLUNKFORWARDER_PACKAGE_URL }}" + register: download_rpm + until: download_rpm is succeeded + retries: 5 + when: ansible_distribution in common_redhat_variants -# Need to start splunk manually so that it can create various files -# and directories that aren't created till the first run and are needed -# to run some of the below commands. -- name: start splunk manually - shell: > - {{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt - creates={{splunkforwarder_output_dir}}/var/lib/splunk - when: download_deb.changed - register: started_manually +- name: Install splunk forwarder rpm + shell: "rpm -i /tmp/{{ SPLUNKFORWARDER_RPM }}" + when: ansible_distribution in common_redhat_variants and download_rpm.changed -- name: stop splunk manually - shell: > - {{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt - when: download_deb.changed and started_manually.changed +- name: Create splunk user + user: + name: splunk + createhome: no + state: present + append: yes + groups: syslog,adm + when: download_rpm.changed or download_deb.changed -- name: create boot script - shell: > - {{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt - creates=/etc/init.d/splunk +- name: Create boot script + shell: "{{ splunkforwarder_output_dir }}/bin/splunk enable boot-start -systemd-managed 1 -user splunk --accept-license --answer-yes --no-prompt" + args: + creates: /etc/systemd/system/SplunkForwarder.service register: create_boot_script - when: download_deb.changed - notify: restart splunkforwarder + when: download_rpm.changed or download_deb.changed -# Update credentials -- name: update admin pasword - shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt" - when: download_deb.changed - notify: restart splunkforwarder +- name: Remove old boot script + file: + path: /etc/init.d/splunk + state: absent + when: (download_rpm.changed or download_deb.changed) and create_boot_script.changed -- name: add chkconfig to init script - shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk' - when: download_deb.changed and create_boot_script.changed - notify: restart splunkforwarder +- name: Accept the Splunk license automatically when starting + lineinfile: + dest: /etc/systemd/system/SplunkForwarder.service + regexp: '^ExecStart=' + line: 'ExecStart=/opt/splunkforwarder/bin/splunk _internal_launch_under_systemd --accept-license --no-prompt --answer-yes' + when: (download_rpm.changed or download_deb.changed) and create_boot_script.changed -# Ensure permissions on splunk content -- name: ensure splunk forder permissions - file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk - when: download_deb.changed +- name: reload systemd configuration + command: systemctl daemon-reload + when: (download_rpm.changed or download_deb.changed) and create_boot_script.changed notify: restart splunkforwarder -# Drop template files. -- name: drop input configuration - template: - src=opt/splunkforwarder/etc/system/local/inputs.conf.j2 - dest=/opt/splunkforwarder/etc/system/local/inputs.conf - owner=splunk - group=splunk - mode=644 +- name: Ensure splunk forwader permissions + file: + path: "{{ splunkforwarder_output_dir }}" + state: directory + recurse: yes + owner: splunk + group: splunk + when: download_rpm.changed or download_deb.changed notify: restart splunkforwarder -- name: create outputs config file +- name: Make sure necessary dirs exist + file: + path: "{{ item }}" + owner: splunk + group: splunk + state: directory + with_items: + - "{{ splunkforwarder_output_dir }}/{{ splunkforwarder_ssl_cert_path }}" + - /opt/splunkforwarder/etc/system/local + +- name: Write SSL certs to disk + copy: + dest: "{{ splunkforwarder_output_dir }}/{{ splunkforwarder_ssl_cert_path }}/{{ item.target_group }}.pem" + content: "{{ item.ssl_cert }}" + owner: splunk + group: splunk + mode: "0400" + when: item.ssl_cert is defined + with_items: "{{ SPLUNKFORWARDER_SERVERS }}" + +- name: Write root CA to disk + copy: + dest: "{{ splunkforwarder_output_dir }}/{{ splunkforwarder_ssl_cert_path }}/{{ item.target_group }}-rootca.pem" + content: "{{ item.ssl_root_ca }}" + owner: splunk + group: splunk + mode: "0400" + when: item.ssl_cert is defined + with_items: "{{ SPLUNKFORWARDER_SERVERS }}" + +- name: Create inputs, outputs and server configuration template: - src=opt/splunkforwarder/etc/system/local/outputs.conf.j2 - dest=/opt/splunkforwarder/etc/system/local/outputs.conf - owner=splunk - group=splunk - mode=644 + src: "opt/splunkforwarder/etc/system/local/{{ item }}.conf.j2" + dest: "/opt/splunkforwarder/etc/system/local/{{ item }}.conf" + owner: splunk + group: splunk + mode: "0644" + with_items: + - inputs + - outputs + - server notify: restart splunkforwarder diff --git a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 index 79146941026..e42b7d277c0 100644 --- a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 +++ b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 @@ -1,13 +1,37 @@ +# {{ ansible_managed }} +{% if SPLUNKFORWARDER_HOST_VALUE is defined %} [default] -host = {{ansible_hostname}} +host = {{ SPLUNKFORWARDER_HOST_VALUE }} +{% endif %} {% for loggable in SPLUNKFORWARDER_LOG_ITEMS%} -[monitor://{{loggable.directory}}] -recursive = {{loggable.recursive|default(false)}} +[monitor://{{ loggable.source }}] +{% if loggable.blacklist is defined %} +blacklist = {{ loggable.blacklist }} +{% else %} +blacklist = ((\.(gz))|\d)$ +{% endif %} +{% if loggable.recursive | default(False) %} +{# There's a bug in which "recursive" must be unset for logs to be forwarded #} +{# See https://answers.splunk.com/answers/420901/splunk-not-matching-files-with-wildcard-in-monitor.html #} +recursive = true +{% endif %} {% if loggable.sourcetype is defined %} -sourcetype = {{loggable.sourcetype}} +sourcetype = {{ loggable.sourcetype }} {% endif %} {% if loggable.index is defined %} -index = {{loggable.index}} +index = {{ loggable.index }} +{% endif %} +{% if loggable._TCP_ROUTING is defined %} +_TCP_ROUTING = {{ loggable._TCP_ROUTING }} +{% endif %} +{% if loggable.followSymlink is defined %} +followSymlink = {{ loggable.followSymlink }} +{% endif %} +{% if loggable.crcSalt is defined %} +crcSalt = {{ loggable.crcSalt }} +{% endif %} +{% if loggable.whitelist is defined %} +whitelist = {{ loggable.whitelist }} {% endif %} {% endfor %} diff --git a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/outputs.conf.j2 b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/outputs.conf.j2 index c7d5a1174c4..0b693c484ff 100644 --- a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/outputs.conf.j2 +++ b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/outputs.conf.j2 @@ -1,8 +1,21 @@ -[tcpout] -defaultGroup = default_output_server +# {{ ansible_managed }} -[tcpout:default_output_server] -server = {{SPLUNKFORWARDER_SERVER}} +{% for server in SPLUNKFORWARDER_SERVERS|selectattr("default", "defined") %} +[tcpout] +defaultGroup = {{ server.target_group }} +{% endfor %} -[tcpout-server://{{SPLUNKFORWARDER_SERVER}}] +# forwarder receivers +{% for server in SPLUNKFORWARDER_SERVERS %} +[tcpout:{{ server.target_group }}] +server = {{ server.server }} +compressed = true +{% if server.ssl_cert is defined %} +sslCertPath = $SPLUNK_HOME/{{ splunkforwarder_ssl_cert_path }}/{{ server.target_group }}.pem +sslRootCAPath = $SPLUNK_HOME/{{ splunkforwarder_ssl_cert_path }}/{{ server.target_group }}-rootca.pem +sslPassword = {{ server.ssl_cert_password }} +sslVerifyServerCert = true +sslCommonNameToCheck = {{ server.ssl_common_name }} +{% endif %} +{% endfor %} \ No newline at end of file diff --git a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/server.conf.j2 b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/server.conf.j2 new file mode 100644 index 00000000000..f4cdbd2bf26 --- /dev/null +++ b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/server.conf.j2 @@ -0,0 +1,2 @@ +[httpServer] +disableDefaultPort = true diff --git a/playbooks/roles/sqlite_fix/defaults/main.yml b/playbooks/roles/sqlite_fix/defaults/main.yml new file mode 100644 index 00000000000..570647b61e0 --- /dev/null +++ b/playbooks/roles/sqlite_fix/defaults/main.yml @@ -0,0 +1,25 @@ +--- +SQLITE_FIX_PYTHON_PATH: "python" +SQLITE_FIX_TMP_DIR: "/var/tmp/sqlite_fix" + +PYSQLITE_URL: "https://codeload.github.com/ghaering/pysqlite/tar.gz/2.8.3" +PYSQLITE_CREATED_PATH: "pysqlite-2.8.3" +PYSQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ PYSQLITE_CREATED_PATH }}" + +SQLITE_AUTOCONF_URL: "https://www.sqlite.org/2016/sqlite-autoconf-3140100.tar.gz" +SQLITE_AUTOCONF_CREATED_PATH: "sqlite-autoconf-3140100" +SQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ SQLITE_AUTOCONF_CREATED_PATH }}" + +sqlite_s3_packages: + - name: libsqlite3-0_3.14.1-1build1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-0_3.14.1-1build1_amd64.deb + - name: libsqlite3-0-dbg_3.14.1-1build1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-0-dbg_3.14.1-1build1_amd64.deb + - name: libsqlite3-dev_3.14.1-1build1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-dev_3.14.1-1build1_amd64.deb + - name: libsqlite3-dev-dbgsym_3.14.1-1build1_amd64.ddeb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-dev-dbgsym_3.14.1-1build1_amd64.ddeb + +sqlite_s3_packages_focal: + - libsqlite3-0 + - libsqlite3-dev diff --git a/playbooks/roles/sqlite_fix/tasks/main.yml b/playbooks/roles/sqlite_fix/tasks/main.yml new file mode 100644 index 00000000000..5052c05c824 --- /dev/null +++ b/playbooks/roles/sqlite_fix/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: Creates directory + file: + path: "{{ SQLITE_FIX_TMP_DIR }}" + state: directory + mode: 0775 + tags: + - devstack + - devstack:install + - install + + +# Tasks to download and upgrade pysqlite to prevent segfaults when testing in devstack +- name: Download and unzip sqlite autoconf update + unarchive: + src: "{{ SQLITE_AUTOCONF_URL }}" + dest: "{{ SQLITE_FIX_TMP_DIR }}" + remote_src: yes + tags: + - devstack + - devstack:install + - install + +# Python 3 requires a version of sqlite that is not present in the Xenial +# repositories. Download it from an s3 bucket +- name: + apt: + name: libsqlite3-dev + state: absent + +- name: Download newer versions of sqlite that are not availble in Xenial + get_url: + dest: "/tmp/{{ item.name }}" + url: "{{ item.url }}" + with_items: "{{ sqlite_s3_packages }}" + when: ansible_distribution_release != "focal" + +- name: Install sqlite packages + shell: gdebi -nq /tmp/{{ item.name }} + with_items: "{{ sqlite_s3_packages }}" + when: ansible_distribution_release != "focal" + +- name: install package dependencies for focal + apt: + name: "{{ sqlite_s3_packages_focal }}" + update_cache: yes + cache_valid_time: "{{ cache_valid_time }}" + when: ansible_distribution_release == 'focal' diff --git a/playbooks/roles/sqoop/defaults/main.yml b/playbooks/roles/sqoop/defaults/main.yml new file mode 100644 index 00000000000..0ffe6e8f355 --- /dev/null +++ b/playbooks/roles/sqoop/defaults/main.yml @@ -0,0 +1,44 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role sqoop +# + +SQOOP_VERSION: 1.4.6 + +# There is no non-alpha version here, this is just the version of sqoop that is compatible with Hadoop 2 +SQOOP_HADOOP_VERSION: 2.0.4-alpha +SQOOP_MYSQL_CONNECTOR_VERSION: 5.1.29 +SQOOP_HOME: "{{ HADOOP_COMMON_USER_HOME }}/sqoop" +SQOOP_CONF: "{{ SQOOP_HOME }}/conf" +SQOOP_LIB: "{{ SQOOP_HOME }}/lib" + +# +# vars are namespace with the module name. +# +sqoop_role_name: sqoop +sqoop_temporary_dir: /var/tmp +sqoop_base_filename: "sqoop-{{ SQOOP_VERSION }}.bin__hadoop-{{ SQOOP_HADOOP_VERSION }}" +sqoop_dist: + filename: "{{ sqoop_base_filename }}.tar.gz" + url: "http://archive.apache.org/dist/sqoop/%7B%7B%20SQOOP_VERSION%20%7D%7D/%7B%7B%20sqoop_base_filename%20%7D%7D.tar.gz" + sha256sum: d582e7968c24ff040365ec49764531cb76dfa22c38add5f57a16a57e70d5d496 +sqoop_mysql_connector_dist: + filename: "mysql-connector-java-{{ SQOOP_MYSQL_CONNECTOR_VERSION }}.tar.gz" + url: "http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-%7B%7B%20SQOOP_MYSQL_CONNECTOR_VERSION%20%7D%7D.tar.gz" + sha256sum: 04ad83b655066b626daaabb9676a00f6b4bc43f0c234cbafafac1209dcf1be73 + +# +# OS packages +# + +sqoop_debian_pkgs: [] + +sqoop_redhat_pkgs: [] diff --git a/playbooks/roles/sqoop/meta/main.yml b/playbooks/roles/sqoop/meta/main.yml new file mode 100644 index 00000000000..214928bec94 --- /dev/null +++ b/playbooks/roles/sqoop/meta/main.yml @@ -0,0 +1,23 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role sqoop +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - hadoop_common diff --git a/playbooks/roles/sqoop/tasks/main.yml b/playbooks/roles/sqoop/tasks/main.yml new file mode 100644 index 00000000000..cb09b5405f1 --- /dev/null +++ b/playbooks/roles/sqoop/tasks/main.yml @@ -0,0 +1,105 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role sqoop +# +# Overview: +# +# Install and configure Sqoop. This tool is used by the analytics stack to take database snapshots. +# +# Dependencies: +# +# hadoop_common: Sqoop can distribute snapshotting work out to a cluster of workers, it uses Hadoop to do so. + +- name: Check if downloaded and extracted + stat: + path: "{{ SQOOP_LIB }}/mysql-connector-java-{{ SQOOP_MYSQL_CONNECTOR_VERSION }}-bin.jar" + register: installed + +- name: distribution downloaded + get_url: + url: "{{ sqoop_dist.url }}" + sha256sum: "{{ sqoop_dist.sha256sum }}" + dest: "{{ sqoop_temporary_dir }}" + when: not installed.stat.exists + +- name: Distribution extracted + unarchive: + src: "{{ sqoop_temporary_dir }}/{{ sqoop_dist.filename }}" + dest: "{{ HADOOP_COMMON_USER_HOME }}" + copy: no + when: not installed.stat.exists + +- name: Set the Permission + shell: chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ sqoop_base_filename }} + args: + chdir: "{{ HADOOP_COMMON_USER_HOME }}" + when: not installed.stat.exists + +- name: Versioned directory symlink created + file: + src: "{{ HADOOP_COMMON_USER_HOME }}/{{ sqoop_base_filename }}" + dest: "{{ SQOOP_HOME }}" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + state: link + +- name: MySQL connector distribution downloaded + get_url: + url: "{{ sqoop_mysql_connector_dist.url }}" + sha256sum: "{{ sqoop_mysql_connector_dist.sha256sum }}" + dest: "{{ sqoop_temporary_dir }}" + when: not installed.stat.exists + +- name: MySQL connector distribution extracted + unarchive: + src: "{{ sqoop_temporary_dir }}/{{ sqoop_mysql_connector_dist.filename }}" + dest: "{{ sqoop_temporary_dir }}" + copy: no + when: not installed.stat.exists + +- name: Sqoop lib exists + file: + path: "{{ SQOOP_LIB }}" + state: directory + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + +#TODO use copy module with remote_src: True once migrate to Ansible 2.x +- name: MySQL connector installed + shell: "cp mysql-connector-java-{{ SQOOP_MYSQL_CONNECTOR_VERSION }}-bin.jar {{ SQOOP_LIB }}" + args: + chdir: "{{ sqoop_temporary_dir }}/mysql-connector-java-{{ SQOOP_MYSQL_CONNECTOR_VERSION }}" + when: not installed.stat.exists + +- name: FIx MySQL connector permission + file: + path: "{{ SQOOP_LIB }}/mysql-connector-java-{{ SQOOP_MYSQL_CONNECTOR_VERSION }}-bin.jar" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + when: not installed.stat.exists + +- name: Configuration installed + template: + src: "{{ item }}.j2" + dest: "{{ SQOOP_CONF }}/{{ item }}" + mode: "0640" + owner: "{{ hadoop_common_user }}" + group: "{{ hadoop_common_group }}" + with_items: + - sqoop-env.sh + +- name: env vars sourced in hadoop env + lineinfile: + dest: "{{ hadoop_common_env }}" + state: present + regexp: "^. {{ SQOOP_CONF }}/sqoop-env.sh" + line: ". {{ SQOOP_CONF }}/sqoop-env.sh" \ No newline at end of file diff --git a/playbooks/roles/sqoop/templates/sqoop-env.sh.j2 b/playbooks/roles/sqoop/templates/sqoop-env.sh.j2 new file mode 100644 index 00000000000..eed11a003c5 --- /dev/null +++ b/playbooks/roles/sqoop/templates/sqoop-env.sh.j2 @@ -0,0 +1,5 @@ +#!/bin/bash + +export SQOOP_HOME={{ SQOOP_HOME }} +export SQOOP_LIB=$SQOOP_HOME/lib +export PATH=$PATH:$SQOOP_HOME/bin diff --git a/playbooks/roles/stop_all_edx_services/defaults/main.yml b/playbooks/roles/stop_all_edx_services/defaults/main.yml index 26157f7ac37..792056a197f 100644 --- a/playbooks/roles/stop_all_edx_services/defaults/main.yml +++ b/playbooks/roles/stop_all_edx_services/defaults/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # ## # Defaults for role stop_all_edx_services @@ -15,3 +15,8 @@ # vars are namespace with the module name. # stop_all_edx_services_role_name: stop_all_edx_services + +# set this to "--no-wait" if you want to not wait for all +# superviser jobs to finish. Useful when used in conjunction +# with minos. +STOP_ALL_EDX_SERVICES_EXTRA_ARGS: "" diff --git a/playbooks/roles/stop_all_edx_services/handlers/main.yml b/playbooks/roles/stop_all_edx_services/handlers/main.yml index f80bdabb316..977bdb89b44 100644 --- a/playbooks/roles/stop_all_edx_services/handlers/main.yml +++ b/playbooks/roles/stop_all_edx_services/handlers/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -18,39 +18,49 @@ # an AMI. # # -- name: stop supervisor - service: name=supervisor state=stopped +- name: supervisorctl_stop_all + shell: /edx/bin/supervisorctl stop all + ignore_errors: True -- name: stop supervisor.devpi - service: name=supervisor.devpi state=stopped +- name: stop supervisor + service: + name: supervisor + state: stopped + arguments: "{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}" - name: stop nginx - service: name=nginx state=stopped + service: + name: nginx + state: stopped + arguments: "{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}" - name: stop rabbitmq-server - service: name=rabbitmq-server state=stopped + service: + name: rabbitmq-server + state: stopped + arguments: "{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}" - name: stop mysql - service: name=mysql state=stopped + service: + name: mysql + state: stopped + arguments: "{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}" - name: stop memcached - service: name=memcached state=stopped - -- name: stop supervisor.devpi - service: name=supervisor.devpi state=stopped - -- name: stop nginx - service: name=nginx state=stopped - -- name: stop rabbitmq-server - service: name=rabbitmq-server state=stopped + service: + name: memcached + state: stopped + arguments: "{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}" - name: stop mongodb - service: name=mongodb state=stopped + service: + name: mongodb + state: stopped + arguments: "{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}" +# Celery and Supervisord should not be killed because they may have long running tasks that need to finish - name: kill processes by user - shell: pkill -u {{ item }} || true + shell: pgrep -u {{ item }} -laf | grep -v celery | grep -v supervisord | grep -v gunicorn | awk '{ print $1}' | xargs -I {} kill {} || true with_items: - - www-data - - devpi.supervisor - - rabbitmq + - www-data + - rabbitmq diff --git a/playbooks/roles/stop_all_edx_services/tasks/main.yml b/playbooks/roles/stop_all_edx_services/tasks/main.yml index e168a33d319..e6934b1f9cf 100644 --- a/playbooks/roles/stop_all_edx_services/tasks/main.yml +++ b/playbooks/roles/stop_all_edx_services/tasks/main.yml @@ -2,10 +2,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -22,41 +22,85 @@ # - stop_all_edx_services # # -- name: stop supervisor - stat: path=/etc/init/supervisor.conf + +# Hermes is not used everywhere, try to stop it if it is running. +# We ignore errors here because of the various states the process +# and machine could be in and because we do not install Hermes +# on everything. +- name: stop Hermes if it exists + service: + name: "hermes" + state: stopped + ignore_errors: yes + +- name: supervisorctl stop all + stat: + path: /etc/init/supervisor.conf + register: stat_out + changed_when: stat_out is defined and stat_out.stat.exists + notify: + - supervisorctl_stop_all + +- name: supervisorctl stop all systemd + stat: + path: /etc/systemd/system/supervisor.service + register: stat_out + changed_when: stat_out is defined and stat_out.stat.exists + notify: + - supervisorctl_stop_all + +- name: Get supervisorctl output + shell: "/edx/bin/supervisorctl status" + register: supervisorctl_command_result + ignore_errors: True + +- name: Stop supervisor + stat: + path: /etc/init/supervisor.conf register: stat_out - changed_when: stat_out.stat.exists - notify: stop supervisor + changed_when: stat_out is defined and stat_out.stat.exists and 'RUNNING' not in supervisorctl_command_result.stdout + notify: + - stop supervisor -- name: stop supervisor.devpi - stat: path=/etc/init/supervisor.devpi.conf +- name: Stop supervisor systemd + stat: + path: /etc/systemd/system/supervisor.service register: stat_out - changed_when: stat_out.stat.exists - notify: stop supervisor + changed_when: stat_out is defined and stat_out.stat.exists and 'RUNNING' not in supervisorctl_command_result.stdout + notify: + - stop supervisor -- name: stop nginx - stat: path=/etc/init.d/nginx +- name: Stop nginx + stat: + path: /etc/init.d/nginx register: stat_out - changed_when: stat_out.stat.exists - notify: stop nginx + changed_when: stat_out is defined and stat_out.stat.exists + notify: + - stop nginx -- name: stop rabbitmq-server - stat: path=/etc/init.d/rabbitmq-server +- name: Stop rabbitmq-server + stat: + path: /etc/init.d/rabbitmq-server register: stat_out - changed_when: stat_out.stat.exists - notify: stop rabbitmq-server + changed_when: stat_out is defined and stat_out.stat.exists + notify: + - stop rabbitmq-server -- name: stop memcached - stat: path=/etc/init.d/memcached +- name: Stop memcached + stat: + path: /etc/init.d/memcached register: stat_out - changed_when: stat_out.stat.exists - notify: stop memcached + changed_when: stat_out is defined and stat_out.stat.exists + notify: + - stop memcached -- name: stop mongodb - stat: path=/etc/init.d/mongodb +- name: Stop mongodb + stat: + path: /etc/init.d/mongodb register: stat_out - changed_when: stat_out.stat.exists - notify: stop mongodb + changed_when: stat_out is defined and stat_out.stat.exists + notify: + - stop mongodb - shell: "true" notify: kill processes by user diff --git a/playbooks/roles/supervisor/defaults/main.yml b/playbooks/roles/supervisor/defaults/main.yml index 9da94d4e166..75c00e79943 100644 --- a/playbooks/roles/supervisor/defaults/main.yml +++ b/playbooks/roles/supervisor/defaults/main.yml @@ -1,10 +1,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -19,6 +19,7 @@ SUPERVISOR_HTTP_BIND_IP: '127.0.0.1' supervisor_http_bind_port: '9001' supervisor_app_dir: "{{ COMMON_APP_DIR }}/supervisor" supervisor_cfg_dir: "{{ supervisor_app_dir }}/conf.d" +supervisor_available_dir: "{{ supervisor_app_dir }}/conf.available.d" supervisor_data_dir: "{{ COMMON_DATA_DIR }}/supervisor" supervisor_venvs_dir: "{{ supervisor_app_dir }}/venvs" supervisor_venv_dir: "{{ supervisor_venvs_dir }}/supervisor" @@ -34,3 +35,11 @@ supervisor_cfg: "{{ supervisor_app_dir }}/supervisord.conf" # upstart service name and user supervisor_service: supervisor supervisor_service_user: "{{ common_web_user }}" + +SUPERVISOR_VERSION: '4.2.1' + +supervisor_pip_pkgs: + - boto=={{COMMON_BOTO_VERSION}} + - backoff==1.4.3 + +supervisor_spec: [] diff --git a/playbooks/roles/supervisor/files/pre_supervisor_checks.py b/playbooks/roles/supervisor/files/pre_supervisor_checks.py new file mode 100755 index 00000000000..52b26215d38 --- /dev/null +++ b/playbooks/roles/supervisor/files/pre_supervisor_checks.py @@ -0,0 +1,226 @@ +import argparse +import backoff +import boto.ec2 +from boto.utils import get_instance_metadata, get_instance_identity +from boto.exception import AWSConnectionError +import os +import subprocess +import traceback +import socket +import time + +# Services that should be checked for migrations. +GENERIC_MIGRATION_COMMAND = ". {env_file}; sudo -E -u {user} {python} {code_dir}/manage.py showmigrations" +EDXAPP_MIGRATION_COMMANDS = { + 'lms': "/edx/bin/edxapp-migrate-lms --noinput --list", + 'cms': "/edx/bin/edxapp-migrate-cms --noinput --list", + 'workers': "/edx/bin/edxapp-migrate-cms --noinput --list; /edx/bin/edxapp-migrate-lms --noinput --list", + } +NGINX_ENABLE = { + 'lms': "sudo ln -sf /edx/app/nginx/sites-available/lms /etc/nginx/sites-enabled/lms", + 'cms': "sudo ln -sf /edx/app/nginx/sites-available/cms /etc/nginx/sites-enabled/cms", + } + +# Max amount of time to wait for tags to be applied. +MAX_BACKOFF = 120 +INITIAL_BACKOFF = 1 + +MAX_ATTEMPTS = int(os.environ.get('RETRY_MAX_ATTEMPTS', 5)) + +REGION = get_instance_identity()['document']['region'] + +def services_for_instance(instance_id): + """ + Get the list of all services named by the services tag in this + instance's tags. + """ + ec2 = boto.ec2.connect_to_region(REGION) + reservations = ec2.get_all_instances(instance_ids=[instance_id]) + for reservation in reservations: + for instance in reservation.instances: + if instance.id == instance_id: + try: + services = instance.tags['services'].split(',') + except KeyError as ke: + msg = f"Tag named 'services' not found on this instance({instance_id})" + raise Exception(msg) + + yield from services + +def edp_for_instance(instance_id): + ec2 = boto.ec2.connect_to_region(REGION) + reservations = ec2.get_all_instances(instance_ids=[instance_id]) + for reservation in reservations: + for instance in reservation.instances: + if instance.id == instance_id: + try: + environment = instance.tags['environment'] + deployment = instance.tags['deployment'] + play = instance.tags['play'] + except KeyError as ke: + msg = f"{ke.message} tag not found on this instance({instance_id})" + raise Exception(msg) + return (environment, deployment, play) + +@backoff.on_exception(backoff.expo, + Exception, + max_tries=MAX_ATTEMPTS) +def check_command_output_with_backoff(cmd): + """ + Run command using subprocess. Retry if a non-zero error code is returned + + Arguments: + cmd: string - command to be run via subprocess + + Returns a (byte) string + """ + return subprocess.check_output(cmd, shell=True, ) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="Enable all services that are in the services tag of this ec2 instance.") + parser.add_argument("-a","--available", + help="The location of the available services.") + parser.add_argument("-e","--enabled", + help="The location of the enabled services.") + + app_migration_args = parser.add_argument_group("app_migrations", + "Args for running app migration checks.") + app_migration_args.add_argument("--check-migrations", action='http://github.com/store_true', + help="Enable checking migrations.") + app_migration_args.add_argument("--check-migrations-service-names", + help="Comma seperated list of service names that should be checked for migrations") + app_migration_args.add_argument("--app-python", + help="Path to python to use for executing migration check.") + app_migration_args.add_argument("--app-env", + help="Location of the app environment file.") + app_migration_args.add_argument("--app-code-dir", + help="Location of the app code.") + + args = parser.parse_args() + + report = [] + prefix = None + + instance_id = get_instance_metadata()['instance-id'] + prefix = instance_id + + ec2 = boto.ec2.connect_to_region(REGION) + reservations = ec2.get_all_instances(instance_ids=[instance_id]) + instance = reservations[0].instances[0] + if instance.instance_profile['arn'].endswith('/abbey'): + print("Running an abbey build. Not starting any services.") + # Needs to exit with 1 instead of 0 to prevent + # services from starting. + exit(1) + time_left = MAX_BACKOFF + backoff = INITIAL_BACKOFF + + environment = None + deployment = None + play = None + while time_left > 0: + try: + environment, deployment, play = edp_for_instance(instance_id) + prefix = "{environment}-{deployment}-{play}-{instance_id}".format( + environment=environment, + deployment=deployment, + play=play, + instance_id=instance_id) + break + except Exception as e: + print(f"Failed to get EDP for {instance_id}: {str(e)}") + # With the time limit being 2 minutes we will + # try 5 times before giving up. + time.sleep(backoff) + time_left -= backoff + backoff = backoff * 2 + + if environment is None or deployment is None or play is None: + msg = "Unable to retrieve environment, deployment, or play tag." + print(msg) + exit(1) + + #get the hostname of the sandbox + hostname = socket.gethostname() + + ami_id = get_instance_metadata()['ami-id'] + + try: + #get the list of the volumes, that are attached to the instance + volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id}) + + for volume in volumes: + volume.add_tags({"hostname": hostname, + "environment": environment, + "deployment": deployment, + "cluster": play, + "instance-id": instance_id, + "ami-id": ami_id, + "created": volume.create_time }) + except Exception as e: + msg = f"Failed to tag volumes associated with {instance_id}: {str(e)}" + print(msg) + + try: + for service in services_for_instance(instance_id): + if service in NGINX_ENABLE: + subprocess.call(NGINX_ENABLE[service], shell=True) + report.append(f"Enabling nginx: {service}") + # We have to reload the new config files + subprocess.call("/bin/systemctl reload nginx", shell=True) + + if (args.check_migrations and + args.app_python != None and + args.app_env != None and + args.app_code_dir != None and + args.check_migrations_service_names != None and + service in args.check_migrations_service_names.split(',')): + + user = play + # Legacy naming workaround + # Using the play works everywhere but here. + if user == "analyticsapi": + user="analytics_api" + + cmd_vars = { + 'python': args.app_python, + 'env_file': args.app_env, + 'code_dir': args.app_code_dir, + 'service': service, + 'user': user, + } + cmd = GENERIC_MIGRATION_COMMAND.format(**cmd_vars) + if service in EDXAPP_MIGRATION_COMMANDS: + cmd = EDXAPP_MIGRATION_COMMANDS[service] + + if os.path.exists(cmd_vars['code_dir']): + os.chdir(cmd_vars['code_dir']) + # Run migration check command. + output = check_command_output_with_backoff(cmd) + if b'[ ]' in output: + raise Exception(f"Migrations have not been run for {service}") + else: + report.append(f"Checked migrations: {service}") + + # Link to available service. + available_file = os.path.join(args.available, f"{service}.conf") + link_location = os.path.join(args.enabled, f"{service}.conf") + if os.path.exists(available_file): + subprocess.call(f"sudo -u supervisor ln -sf {available_file} {link_location}", shell=True) + report.append(f"Enabling service: {service}") + else: + raise Exception(f"No conf available for service: {link_location}") + + except AWSConnectionError as ae: + msg = f"{prefix}: ERROR : {ae}" + raise ae + except Exception as e: + msg = f"{prefix}: ERROR : {e}" + print(msg) + traceback.print_exc() + raise e + else: + msg = "{}: {}".format(prefix, " | ".join(report)) + print(msg) diff --git a/playbooks/roles/supervisor/tasks/main.yml b/playbooks/roles/supervisor/tasks/main.yml index 672a99f203d..b0e83e723b3 100644 --- a/playbooks/roles/supervisor/tasks/main.yml +++ b/playbooks/roles/supervisor/tasks/main.yml @@ -1,10 +1,10 @@ # # edX Configuration # -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT # # # @@ -50,83 +50,192 @@ # supervisor_service: upstart-service-name # --- -- name: create application user - user: > - name="{{ supervisor_user }}" - createhome=no - shell=/bin/false - -- name: create supervisor service user - user: > - name="{{ supervisor_service_user }}" - createhome=no - shell=/bin/false - -- name: create supervisor directories - file: > - name={{ item }} - state=directory - owner={{ supervisor_user }} - group={{ supervisor_service_user }} +- name: Create application and supervisor service user + user: + name: "{{ item }}" + createhome: no + shell: /bin/false + with_items: + - "{{ supervisor_user }}" + - "{{ supervisor_service_user }}" + tags: + - install + - install:base + +- name: Create supervisor and service user accessible directories + file: + path: "{{ item }}" + state: directory + owner: "{{ supervisor_user }}" + group: "{{ supervisor_service_user }}" + mode: "0755" with_items: - "{{ supervisor_app_dir }}" - "{{ supervisor_venv_dir }}" - "{{ supervisor_cfg_dir }}" + - "{{ supervisor_available_dir }}" + tags: + - install + - install:base -- name: create supervisor directories - file: > - name={{ item }} - state=directory - owner={{ supervisor_service_user }} - group={{ supervisor_user }} +- name: Create supervisor directories + file: + path: "{{ item }}" + state: directory + owner: "{{ supervisor_service_user }}" + group: "{{ supervisor_user }}" with_items: - "{{ supervisor_data_dir }}" - "{{ supervisor_log_dir }}" + tags: + - install + - install:base + +- name: Install supervisor in its venv + pip: + name: supervisor + version: "{{ SUPERVISOR_VERSION }}" + virtualenv: "{{ supervisor_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + become_user: "{{ supervisor_user }}" + tags: + - install + - install:base +- name: Install supervisor in its venv + pip: + name: "{{ supervisor_pip_pkgs }}" + virtualenv: "{{ supervisor_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + become_user: "{{ supervisor_user }}" + tags: + - install + - install:base -- name: install supervisor in its venv - pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present - sudo_user: "{{ supervisor_user }}" + # 14.04 +- name: Create supervisor upstart job + template: + src: "etc/init/supervisor-upstart.conf.j2" + dest: "/etc/init/{{ supervisor_service }}.conf" + owner: root + group: root + when: ansible_distribution_release == 'trusty' + tags: + - install + - install:base -- name: create supervisor upstart job - template: > - src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf - owner=root group=root +# NB: with systemd, pre_supervisor is a pre-task for supervisor, not a separate service +- name: Create supervisor systemd job + template: + src: "etc/init/supervisor-systemd.service.j2" + dest: "/etc/systemd/system/{{ supervisor_service }}.service" + owner: root + group: root + when: ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - install + - install:base -- name: create supervisor master config - template: > - src=supervisord.conf.j2 dest={{ supervisor_cfg }} - owner={{ supervisor_user }} group={{ supervisor_service_user }} - mode=0644 +- name: Write the pre_suprevisor python script + copy: + src: pre_supervisor_checks.py + dest: "{{ supervisor_app_dir }}/pre_supervisor_checks.py" + owner: "{{ supervisor_user }}" + group: "{{ supervisor_service_user }}" + mode: "0750" + when: disable_edx_services + tags: + - to-remove + - aws-specfic -- name: create a symlink for supervisortctl - file: > - src={{ supervisor_ctl }} - dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }} - state=link +- name: Create supervisor master config + template: + src: "edx/app/supervisor/supervisord.conf.j2" + dest: "{{ supervisor_cfg }}" + owner: "{{ supervisor_user }}" + group: "{{ supervisor_service_user }}" + mode: "0644" + tags: + - install + - install:configuration + +# this link is deprecated in favor of the shell wrapper +- name: Remove a symlink for supervisortctl + file: + # src: "{{ supervisor_ctl }}" + dest: "{{ COMMON_BIN_DIR }}/{{ supervisor_ctl | basename }}" + state: absent when: supervisor_service == "supervisor" + tags: + - install + - install:configuration -- name: create a symlink for supervisor cfg - file: > - src={{ item }} - dest={{ COMMON_CFG_DIR }}/{{ item|basename }} - state=link +# these links are deprecated in favor of the shell wrapper +- name: Remove symlink for supervisor cfg + file: + # src: "{{ item }}" + dest: "{{ COMMON_CFG_DIR }}/{{ item | basename }}" + state: absent when: supervisor_service == "supervisor" with_items: - - "{{ supervisor_cfg }}" - - "{{ supervisor_cfg_dir }}" + - "{{ supervisor_cfg }}" + - "{{ supervisor_cfg_dir }}" + tags: + - install + - install:configuration + +- name: Create helper script for running supervisor + template: + src: "edx/bin/supervisorctl.j2" + dest: "{{ COMMON_BIN_DIR }}/supervisorctl" + owner: "{{ supervisor_service_user }}" + mode: "0755" + when: supervisor_service == "supervisor" + tags: + - install + - install:configuration + + # This command and the subsequent check in the when condition are related + # to this bug: https://github.com/ansible/ansible-modules-core/issues/593 +- name: Are we in a Docker container + shell: echo $(egrep -q 'docker' /proc/self/cgroup && echo 'yes' || echo 'no') + ignore_errors: yes + register: docker_container + tags: + - install + - install:base + +- name: Enable supervisor to start on boot + service: + name: "{{ supervisor_service }}.service" + enabled: yes + when: (ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal') and docker_container.stdout != 'yes' + tags: + - install + - install:base -- name: start supervisor - service: > - name={{supervisor_service}} - state=started +- name: Start supervisor + service: + name: "{{ supervisor_service }}" + state: started register: start_supervisor + when: not disable_edx_services + tags: + - manage + - manage:start # calling update on supervisor too soon after it # starts will result in an errror. -- name: wait for web port to be available - wait_for: port={{ supervisor_http_bind_port }} timeout=5 +- name: Wait for web port to be available + wait_for: + port: "{{ supervisor_http_bind_port }}" + timeout: 5 when: start_supervisor.changed + tags: + - manage + - manage:start # call supervisorctl update every time, this reloads # the supervisorctl config @@ -134,7 +243,11 @@ # we don't use notifications for supervisor because # they don't work well with parameterized roles. # See https://github.com/ansible/ansible/issues/4853 -- name: update supervisor configuration +- name: Update supervisor configuration shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services register: supervisor_update - changed_when: supervisor_update.stdout != "" + changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" + tags: + - manage + - manage:start diff --git a/playbooks/roles/supervisor/templates/supervisord.conf.j2 b/playbooks/roles/supervisor/templates/edx/app/supervisor/supervisord.conf.j2 similarity index 100% rename from playbooks/roles/supervisor/templates/supervisord.conf.j2 rename to playbooks/roles/supervisor/templates/edx/app/supervisor/supervisord.conf.j2 diff --git a/playbooks/roles/supervisor/templates/edx/bin/supervisorctl.j2 b/playbooks/roles/supervisor/templates/edx/bin/supervisorctl.j2 new file mode 100644 index 00000000000..fe84543c2f2 --- /dev/null +++ b/playbooks/roles/supervisor/templates/edx/bin/supervisorctl.j2 @@ -0,0 +1,2 @@ +#!/bin/bash +sudo -u {{ supervisor_service_user }} {{ supervisor_ctl }} -c {{ supervisor_cfg }} $* diff --git a/playbooks/roles/supervisor/templates/etc/init/supervisor-systemd.service.j2 b/playbooks/roles/supervisor/templates/etc/init/supervisor-systemd.service.j2 new file mode 100644 index 00000000000..3907b30810d --- /dev/null +++ b/playbooks/roles/supervisor/templates/etc/init/supervisor-systemd.service.j2 @@ -0,0 +1,40 @@ +[Unit] +Description=supervisord - Supervisor process control system +Documentation=http://supervisord.org +After=network.target + + +[Service] +{% if disable_edx_services and not devstack -%} +# Run pre_supervisor +ExecStartPre={{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py \ + {% for item in supervisor_spec -%} + {%- if item.code -%} + --check-migrations --check-migrations-service-names {{ item.migration_check_services }} --app-python {{ item.python }} --app-code-dir {{ item.code }} + {%- if item.env is defined %} --app-env {{ item.env }}{% endif %} \ + {% endif %} + {%- endfor -%} + --available={{ supervisor_available_dir }} --enabled={{ supervisor_cfg_dir }} +{% endif %} + +# User will be applied only to ExecStart, not other commands (i.e. ExecStartPre) +# This is needed because pre_supervisor needs to write to supervisor/conf.d, which +# supervisor_service_user does not have permission to do. +PermissionsStartOnly=true +User={{ supervisor_service_user }} + +Type=forking +TimeoutSec=432000 + +ExecStart={{ supervisor_venv_dir }}/bin/supervisord --configuration {{ supervisor_cfg }} +ExecReload={{ supervisor_venv_dir }}/bin/supervisorctl reload +ExecStop={{ supervisor_venv_dir }}/bin/supervisorctl shutdown + +# Trust supervisor to kill all its children +# Otherwise systemd will see that ExecStop ^ comes back synchronously and say "Oh, I can kill everyone in this cgroup" +# https://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStop= +# https://www.freedesktop.org/software/systemd/man/systemd.kill.html +KillMode=none + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/supervisor/templates/etc/init/supervisor-upstart.conf.j2 b/playbooks/roles/supervisor/templates/etc/init/supervisor-upstart.conf.j2 new file mode 100644 index 00000000000..9bb82e91e48 --- /dev/null +++ b/playbooks/roles/supervisor/templates/etc/init/supervisor-upstart.conf.j2 @@ -0,0 +1,13 @@ +description "supervisord" + +{% if disable_edx_services and not devstack -%} + start on stopped pre_supervisor +{% else %} +start on runlevel [2345] +{% endif %} +stop on runlevel [!2345] + +kill timeout 432000 + +setuid {{ supervisor_service_user }} +exec {{ supervisor_venv_dir }}/bin/supervisord -n --configuration {{ supervisor_cfg }} diff --git a/playbooks/roles/supervisor/templates/supervisor-upstart.conf.j2 b/playbooks/roles/supervisor/templates/supervisor-upstart.conf.j2 deleted file mode 100644 index d83e2de6054..00000000000 --- a/playbooks/roles/supervisor/templates/supervisor-upstart.conf.j2 +++ /dev/null @@ -1,7 +0,0 @@ -description "supervisord" - -start on runlevel [2345] -stop on runlevel [!2345] - -setuid {{ supervisor_service_user }} -exec {{ supervisor_venv_dir }}/bin/supervisord --nodaemon --configuration {{ supervisor_cfg }} diff --git a/playbooks/roles/swapfile/LICENSE b/playbooks/roles/swapfile/LICENSE new file mode 100644 index 00000000000..c858c5132ee --- /dev/null +++ b/playbooks/roles/swapfile/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Kamal Nasser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/playbooks/roles/swapfile/README.rst b/playbooks/roles/swapfile/README.rst new file mode 100644 index 00000000000..952b1f269c5 --- /dev/null +++ b/playbooks/roles/swapfile/README.rst @@ -0,0 +1,31 @@ +swapfile +######## + +Creates and enables a swap file. + +Slightly modified from https://github.com/kamaln7/ansible-swapfile + +License +******* + +The MIT License (MIT) + +Copyright (c) 2014 Kamal Nasser hello@kamal.io + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/playbooks/roles/swapfile/defaults/main.yml b/playbooks/roles/swapfile/defaults/main.yml new file mode 100644 index 00000000000..423f633a93d --- /dev/null +++ b/playbooks/roles/swapfile/defaults/main.yml @@ -0,0 +1,22 @@ +--- +# Size of the desired swap file. Use a MB or GB suffix. +SWAPFILE_SIZE: 2GB +# SWAPFILE_LOCATION: Location of the swap file managed by this role. +SWAPFILE_LOCATION: /swapfile +# SWAPFILE_USE_DD: set this to True if your filesystem does not support +# fallocate (e.g. if you use ext3). dd will then be used instead of fallocate. +SWAPFILE_USE_DD: False + +# +# Advanced, optional settings: +# + +# SWAPFILE_SWAPPINESS: Update sysctl.conf to set the swappiness percentage +# (vm.swappiness) -- the lower it is, the less your system swaps memory pages. +# If this is False (default), no change will be made. +SWAPFILE_SWAPPINESS: False +# SWAPFILE_VFS_CACHE_PRESSURE: Update sysctl.conf to set the VFS cache pressure. +# "this percentage value controls the tendency of the kernel to reclaim the +# memory which is used for caching of directory and inode objects." +# If this is False (default), no change will be made. +SWAPFILE_VFS_CACHE_PRESSURE: False diff --git a/playbooks/roles/swapfile/handlers/main.yml b/playbooks/roles/swapfile/handlers/main.yml new file mode 100644 index 00000000000..2c5a7b1c568 --- /dev/null +++ b/playbooks/roles/swapfile/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: reload sysctl + command: sysctl -p diff --git a/playbooks/roles/swapfile/meta/main.yml b/playbooks/roles/swapfile/meta/main.yml new file mode 100644 index 00000000000..3c2d0452d32 --- /dev/null +++ b/playbooks/roles/swapfile/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + author: "Kamal Nasser" + description: swapfile + license: MIT + min_ansible_version: 1.4 + version: 0.4 + categories: + - system + dependencies: [] diff --git a/playbooks/roles/swapfile/tasks/main.yml b/playbooks/roles/swapfile/tasks/main.yml new file mode 100644 index 00000000000..abca4ac9774 --- /dev/null +++ b/playbooks/roles/swapfile/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Write swapfile + command: | + {% if SWAPFILE_USE_DD %} + dd if=/dev/zero of={{ SWAPFILE_LOCATION }} bs=1M count={{ SWAPFILE_SIZE }} creates={{ SWAPFILE_LOCATION }} + {% else %} + fallocate -l {{ SWAPFILE_SIZE }} {{ SWAPFILE_LOCATION }} creates={{ SWAPFILE_LOCATION }} + {% endif %} + register: write_swapfile + when: SWAPFILE_SIZE != false + +- name: Set swapfile permissions + file: path={{ SWAPFILE_LOCATION }} mode=600 + when: SWAPFILE_SIZE != false + +- name: Create swapfile + command: mkswap {{ SWAPFILE_LOCATION }} + register: create_swapfile + when: SWAPFILE_SIZE != false and write_swapfile.changed + +- name: Enable swapfile + command: swapon {{ SWAPFILE_LOCATION }} + when: SWAPFILE_SIZE != false and create_swapfile.changed + +- name: Add swapfile to /etc/fstab + mount: name=none src="http://github.com/%7B%7B%20SWAPFILE_LOCATION%20%7D%7D" fstype=swap opts=sw passno=0 dump=0 state=present + when: SWAPFILE_SIZE != false + +- name: Configure vm.swappiness + lineinfile: dest=/etc/sysctl.conf line="vm.swappiness = {{ SWAPFILE_SWAPPINESS }}" regexp="^vm.swappiness[\s]?=" state=present + notify: reload sysctl + when: SWAPFILE_SWAPPINESS != false + +- name: Configure vm.vfs_cache_pressure + lineinfile: dest=/etc/sysctl.conf line="vm.vfs_cache_pressure = {{ SWAPFILE_VFS_CACHE_PRESSURE }}" regexp="^vm.vfs_cache_pressure[\s]?=" state=present + notify: reload sysctl + when: SWAPFILE_VFS_CACHE_PRESSURE != false diff --git a/playbooks/roles/tableau/defaults/main.yml b/playbooks/roles/tableau/defaults/main.yml new file mode 100644 index 00000000000..f58df8ee5e0 --- /dev/null +++ b/playbooks/roles/tableau/defaults/main.yml @@ -0,0 +1,55 @@ +# variables common to the tableau role, automatically loaded +# when the role is included +--- + +# Variables in all caps are environment specific +# Lowercase variables are internal to the role +# +# Defaults specified here should not contain +# any secrets or host identifying information. +# + +tableau_data_dir: "{{ COMMON_DATA_DIR }}/tableau" +tableau_app_dir: "{{ COMMON_APP_DIR }}/tableau" +tableau_log_dir: "{{ COMMON_LOG_DIR }}/tableau" +tableau_code_dir: "{{ tableau_app_dir }}/tableau" +tableau_installer_dir: "{{ tableau_code_dir }}/linux/automated-installer" + +TABLEAU_ADMIN_USER: tableau +TABLEAU_ADMIN_PASSWORD: 'tableau001-pass' +tableau_user_group: "tableau" +tableau_user: "{{ TABLEAU_ADMIN_USER }}" +tableau_user_createhome: 'yes' +tableau_user_shell: '/bin/false' + +tableau_repo: "https://github.com/tableau/server-install-script-samples.git" +tableau_version: 'master' + +tableau_server_deb_pkg: 'https://downloads.tableau.com/esdalt/2021.3.2/tableau-server-2021-3-2_amd64.deb' +tableau_server_deb_pkg_name: 'tableau-server-amd64.deb' + +TABLEAU_SERVER_ADMIN_USER: "tableau001" +TABLEAU_SERVER_ADMIN_PASSWORD: "tableau001-pass" + +TABLEAU_REGISTRATION_CONFIG_USER_FIRST_NAME: "Mike" +TABLEAU_REGISTRATION_CONFIG_USER_LAST_NAME: "OConnell" +TABLEAU_REGISTRATION_CONFIG_USER_TITLE: "Engineering Manager" +TABLEAU_REGISTRATION_CONFIG_USER_EMAIL: "moconnell@edx.org" +TABLEAU_REGISTRATION_CONFIG_USER_PHONE: "" + +tableau_registration_config: + zip: "02141" + country: "USA" + city: "Cambridge" + last_name: "{{ TABLEAU_REGISTRATION_CONFIG_USER_LAST_NAME }}" + industry: "Engineering" + eula: "Yes" + title: "{{ TABLEAU_REGISTRATION_CONFIG_USER_TITLE }}" + phone: "{{ TABLEAU_REGISTRATION_CONFIG_USER_PHONE }}" + company: "edX, Inc" + state: "MA" + department: "Enterprise" + first_name: "{{ TABLEAU_REGISTRATION_CONFIG_USER_FIRST_NAME }}" + email: "{{ TABLEAU_REGISTRATION_CONFIG_USER_EMAIL }}" + +TABLEAU_LICENSE_KEY: "" diff --git a/playbooks/roles/tableau/meta/main.yml b/playbooks/roles/tableau/meta/main.yml new file mode 100644 index 00000000000..2083f0e1251 --- /dev/null +++ b/playbooks/roles/tableau/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/playbooks/roles/tableau/tasks/main.yml b/playbooks/roles/tableau/tasks/main.yml new file mode 100644 index 00000000000..32925860a55 --- /dev/null +++ b/playbooks/roles/tableau/tasks/main.yml @@ -0,0 +1,129 @@ +# requires: +# - group_vars/all +# - common/tasks/main.yml +--- + +- name: create main group for tableau's default user + group: + name: "{{ tableau_user_group }}" + state: "present" + tags: + - install + - install:base + +- name: create application user + user: + name: "{{ tableau_user }}" + home: "{{ tableau_app_dir }}" + createhome: "{{ tableau_user_createhome }}" + shell: "{{ tableau_user_shell }}" + group: "{{ tableau_user_group }}" + groups: sudo + password: "{{ TABLEAU_ADMIN_PASSWORD | password_hash('sha512') }}" + tags: + - install + - install:base + +- name: create tableau user dirs + file: + path: "{{ item.path }}" + state: directory + owner: "{{ tableau_user }}" + group: "{{ tableau_user_group }}" + mode: "{{ item.mode | default(0755) }}" + with_items: + - { path: "{{ tableau_app_dir }}" } + # needed for the ansible 1.5 git module + - { path: "{{ tableau_app_dir }}/.ssh" } + - { path: "{{ tableau_data_dir }}" } + tags: + - install + - install:base + +- name: create tableau log dir + file: + path: "{{ tableau_log_dir }}" + state: directory + owner: "{{ common_log_user }}" + group: "{{ common_log_user }}" + tags: + - install + - install:base + +- name: set git fetch.prune to ignore deleted remote refs + shell: git config --global fetch.prune true + become_user: "{{ tableau_user }}" + tags: + - install + - install:base + +# Clone tableau repo containing install scripts. +- name: checkout tableau repo into {{ tableau_code_dir }} + git: + dest: "{{ tableau_code_dir }}" + repo: "{{ tableau_repo }}" + version: "{{ tableau_version }}" + accept_hostkey: yes + force: yes + become_user: "{{ tableau_user }}" + register: tableau_checkout + tags: + - install + - install:code + +- name: git clean after checking out tableau repo + shell: cd {{ tableau_code_dir }} && git clean -xdf + become_user: "{{ tableau_user }}" + tags: + - install + - install:code + +- name: Download tableau server + get_url: + url: "{{ tableau_server_deb_pkg }}" + dest: "{{ tableau_installer_dir }}/{{ tableau_server_deb_pkg_name }}" + become_user: "{{ tableau_user }}" + tags: + - install + - install:tableau-server + +- name: copy the template to the desired location + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ item.owner }}" + group: "{{ item.group }}" + mode: "{{ item.mode }}" + with_items: + - { src: 'secrets.j2', dest: '{{ tableau_installer_dir }}/secrets', owner: '{{ tableau_user }}', group: '{{ common_web_user }}', mode: '0644' } + - { src: 'registration.json.j2', dest: '{{ tableau_installer_dir }}/registration.json', owner: '{{ tableau_user }}', group: '{{ common_web_user }}', mode: '0644' } + tags: + - install + - install:base + +- name: Make automated installer executable + file: + dest: "{{ tableau_installer_dir }}/automated-installer" + mode: "a+x" + become_user: "{{ tableau_user }}" + tags: + - install + - install:code + +- name: Ensure "{{ tableau_user }}" is in th sudoers file. + lineinfile: + path: "/etc/sudoers" + state: "present" + regexp: '^%{{ tableau_user }} ALL=' + line: '%{{ tableau_user }} ALL=(ALL) NOPASSWD: ALL' + become: true + tags: + - install + - install:code + +- name: Run automated installer to actually setup the tableau server + shell: "sudo {{ tableau_installer_dir }}/automated-installer -k '{{ TABLEAU_LICENSE_KEY }}' -s {{ tableau_installer_dir }}/secrets -f {{ tableau_installer_dir }}/config.json -r {{ tableau_installer_dir }}/registration.json --accepteula {{ tableau_installer_dir }}/{{ tableau_server_deb_pkg_name }}" + become_user: "{{ tableau_user }}" + tags: + - install + - install:code diff --git a/playbooks/roles/tableau/templates/registration.json.j2 b/playbooks/roles/tableau/templates/registration.json.j2 new file mode 100644 index 00000000000..ad432eb7910 --- /dev/null +++ b/playbooks/roles/tableau/templates/registration.json.j2 @@ -0,0 +1 @@ +{{ tableau_registration_config | to_nice_json }} diff --git a/playbooks/roles/tableau/templates/secrets.j2 b/playbooks/roles/tableau/templates/secrets.j2 new file mode 100644 index 00000000000..83c1e2f4cd1 --- /dev/null +++ b/playbooks/roles/tableau/templates/secrets.j2 @@ -0,0 +1,21 @@ +# Note: If you do not enter the tsm_admin_pass or the +# tableau_server_admin_pass in this file, you will be prompted to enter this +# information during installation. However, you must enter the account names +# for tsm_admin_user and tableau_server_admin_user. +# Credentials for the account that is running the automated-installer script. +# This account will be added to the 'tsmadmin' group. The 'tsmadmin' group is +# created during the Tableau installation process. Members of the 'tsmadmin' +# group can run TSM commands. +# +tsm_admin_user="{{ TABLEAU_ADMIN_USER }}" +tsm_admin_pass="{{ TABLEAU_ADMIN_PASSWORD }}" + +# Enter a username and password to create the initial Tableau administrator +# account. This account will be created in Tableau Server by the installation +# process and will have Tableau Server administrator rights. The user account +# will be local to Tableau Server and will not be a Linux OS account. If you +# are using LDAP or AD for authentication, then the account you specify for +# the Tableau administrator must be a valid account from the directory service. +# +tableau_server_admin_user="{{ TABLEAU_SERVER_ADMIN_USER }}" +tableau_server_admin_pass="{{ TABLEAU_SERVER_ADMIN_PASSWORD }}" diff --git a/playbooks/roles/tableau_de/defaults/main.yml b/playbooks/roles/tableau_de/defaults/main.yml new file mode 100644 index 00000000000..b6f12ac53f3 --- /dev/null +++ b/playbooks/roles/tableau_de/defaults/main.yml @@ -0,0 +1,25 @@ +# variables common to the tableau_de role, automatically loaded +# when the role is included +--- + +# +# Defaults specified here should not contain +# any secrets or host identifying information. +# + +tableau_user_group: "tableau" +tableau_user: "{{ TABLEAU_ADMIN_USER }}" +tsm_admin_user: "{{ TABLEAU_SERVER_ADMIN_USER }}" +tableau_cron_dir: "/etc/cron" + +os_default_user: "ubuntu" +centos_default_user: "centos" + +# Snowflake driver parameter +snowflake_odbc_driver_endpoint: "https://sfc-repo.snowflakecomputing.com/odbc/linux" +snowflake_driver_version: "2.24.2" + +# Postgre driver parameter +postgre_driver_endpoint: "https://downloads.tableau.com/drivers/linux/postgresql" +postgre_driver_version: "42.2.22" + diff --git a/playbooks/roles/tableau_de/meta/main.yml b/playbooks/roles/tableau_de/meta/main.yml new file mode 100644 index 00000000000..202420c7af1 --- /dev/null +++ b/playbooks/roles/tableau_de/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - aws_cloudwatch_agent \ No newline at end of file diff --git a/playbooks/roles/tableau_de/tasks/main.yml b/playbooks/roles/tableau_de/tasks/main.yml new file mode 100644 index 00000000000..7957c9c63ff --- /dev/null +++ b/playbooks/roles/tableau_de/tasks/main.yml @@ -0,0 +1,103 @@ +--- + +- name: Install AWS CLI + apt: + name: awscli + state: present + tags: + - install + - install:base + +- name: Add OS default user in tableau group + shell: "usermod -a -G {{ tableau_user_group }} {{ os_default_user }}" + become: yes + become_method: sudo + tags: + - install + - install:base + +- name: Enable tableau user shell + shell: "usermod -s /bin/bash {{ tableau_user }}" + become: yes + become_method: sudo + tags: + - install + - install:base + +- name: Create directory for Postgre driver if it does not exist + file: + path: /opt/tableau/tableau_driver/jdbc + state: directory + mode: '0777' + tags: + - install + - install:base + +- name: Download Postgre driver jar file + get_url: + url: "{{postgre_driver_endpoint}}/postgresql-{{postgre_driver_version}}.jar" + dest: /opt/tableau/tableau_driver/jdbc/postgresql-{{postgre_driver_version}}.jar + become_user: "{{ tableau_user }}" + tags: + - install + - install:base + +- name: Download Snowflake odbc driver + get_url: + url: "{{snowflake_odbc_driver_endpoint}}/{{snowflake_driver_version}}/snowflake-odbc-{{snowflake_driver_version}}.x86_64.deb" + dest: /tmp/snowflake-odbc-{{snowflake_driver_version}}.x86_64.deb + tags: + - install + - install:base + +- name: Install Snowflake odbc driver + dpkg_selections: + name: snowflake-odbc-{{snowflake_driver_version}}.x86_64.deb + selection: install + tags: + - install + - install:base + +# Add the jenkins user's ssh public key to the running user's autorized keys +# This is needed so that this jenkins instance can be used to update system users +- name: Add the jenkins user's ssh public key to the running user's autorized keys + lineinfile: + path: /home/{{ ansible_ssh_user }}/.ssh/authorized_keys + create: yes + line: "{{ lookup('file', jenkins_ssh_public_keyfile) }}" + when: jenkins_ssh_public_keyfile is defined and jenkins_ssh_public_keyfile + tags: + - ssh + - ssh:keys + +- name: Create TSM Service Status Script Directory + file: + path: "{{ tableau_cron_dir }}" + state: directory + mode: "{{ item.mode | default(0755) }}" + owner: "{{ centos_default_user }}" + group: "{{ tableau_user_group }}" + tags: + - install + - install:cron + +- name: Set up Tsm Service Status Script + template: + src: "tsm_status.sh.j2" + dest: "{{ tableau_cron_dir }}/tsm_status.sh" + mode: 0775 + owner: "{{ centos_default_user }}" + group: "{{ tableau_user_group }}" + tags: + - install + - install:cron + +- name: Create Cron Job to collect TSM status metrics every 5 minutes + cron: + name: Check TSM Status and report metric to CloudWatch + minute: "*/5" + owner: "{{ centos_default_user }}" + job: "{{ tableau_cron_dir }}/tsm_status.sh" + tags: + - install + - install:cron diff --git a/playbooks/roles/tableau_de/templates/tsm_status.sh.j2 b/playbooks/roles/tableau_de/templates/tsm_status.sh.j2 new file mode 100644 index 00000000000..5dd6d62e60b --- /dev/null +++ b/playbooks/roles/tableau_de/templates/tsm_status.sh.j2 @@ -0,0 +1,16 @@ +#!/bin/bash + +# Correctly installed aws_cli lives under /usr/local/bin +export PATH="/usr/local/bin:${PATH}" +RUNNING_STATUS="RUNNING" + +TSM_STATUS=$(sudo runuser -u {{ tsm_admin_user }} tsm status) + +if [[ $TSM_STATUS == *"$RUNNING_STATUS"* ]] +then + # TSM status is RUNNING, sending metric value '1' + aws cloudwatch put-metric-data --metric-name tableau_tsm_service_running --namespace AWS/EC2 --value 1 --region us-east-1 +else + # TSM Status is not RUNNING, sendin metric value '0' + aws cloudwatch put-metric-data --metric-name tableau_tsm_service_running --namespace AWS/EC2 --value 0 --region us-east-1 +fi diff --git a/playbooks/roles/test_build_server/defaults/main.yml b/playbooks/roles/test_build_server/defaults/main.yml new file mode 100644 index 00000000000..300843ef990 --- /dev/null +++ b/playbooks/roles/test_build_server/defaults/main.yml @@ -0,0 +1,19 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role test_build_server +# + +# +# vars are namespace with the module name. +# +test_build_server_user: jenkins +test_build_server_repo_path: /home/jenkins +TEST_EDX_PLATFORM_VERSION: master diff --git a/playbooks/roles/test_build_server/files/test-development-environment.sh b/playbooks/roles/test_build_server/files/test-development-environment.sh new file mode 100755 index 00000000000..03b409e725c --- /dev/null +++ b/playbooks/roles/test_build_server/files/test-development-environment.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +set -e +################################################################################ +# This executes a small subset of the edx-platform tests. It is intended as +# a means of testing newly provisioned AMIs for our jenkins workers. +# +# The two main things that happen here: +# 1. The setup from edx-platform/scripts/all-tests.sh, the script that is +# run by the jenkins workers to kick off tests. +# 2. The paver command for tests, coverage and quality reports are run. +# For the tests, it runs only a small number of test cases for each +# test suite. +############################################################################### + +# Doing this rather than copying the file into the scripts folder so that +# this file doesn't get cleaned out by the 'git clean' in all-tests.sh. +cd edx-platform-clone + +# This will run all of the setup it usually runs, but none of the +# tests because TEST_SUITE isn't defined. +export PYTHON_VERSION=3.8 +source scripts/jenkins-common.sh + +case "$1" in + "unit") + + # Now we can run a subset of the tests via paver. + # Run some of the common/lib unit tests + paver test_lib -t common/lib/xmodule/xmodule/tests/test_stringify.py + + # Generate some coverage reports + # Since `TEST_SUITE` is not set, change the coverage file written by the + # previous test to a generic one. + cp reports/common_lib_xmodule.coverage reports/.coverage + paver coverage + + # Run some of the djangoapp unit tests + paver test_system -t lms/djangoapps/courseware/tests/tests.py + paver test_system -t cms/djangoapps/course_creators/tests/test_views.py + ;; + + "js") + + # Run some of the javascript unit tests + paver test_js_run -s lms-coffee + ;; + + "quality") + # Generate quality reports + paver run_quality + ;; + + *) + echo "args required" + exit 1 +esac diff --git a/playbooks/roles/test_build_server/meta/main.yml b/playbooks/roles/test_build_server/meta/main.yml new file mode 100644 index 00000000000..39c023632c8 --- /dev/null +++ b/playbooks/roles/test_build_server/meta/main.yml @@ -0,0 +1,26 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role test_build_server +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +#### INTENTIONALLY LEFT BLANK #### + +# Since this is a test role, it should not install anything extra onto the +# target machine, thus altering the system under test. Be careful when +# adding dependencies. diff --git a/playbooks/roles/test_build_server/tasks/main.yml b/playbooks/roles/test_build_server/tasks/main.yml new file mode 100644 index 00000000000..d2a93b8aeea --- /dev/null +++ b/playbooks/roles/test_build_server/tasks/main.yml @@ -0,0 +1,48 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role test_build_server +# +# Overview: +# +# +# Dependencies: +# +# +# Example play: +# + +- name: Create clone of edx-platform + git: + repo: "https://github.com/openedx/edx-platform.git" + dest: "{{ test_build_server_repo_path }}/edx-platform-clone" + version: "{{ TEST_EDX_PLATFORM_VERSION }}" + become_user: "{{ test_build_server_user }}" + +- name: get xargs limit + shell: "xargs --show-limits" + +- name: Copy test-development-environment.sh to somewhere the jenkins user can access it + copy: + src: test-development-environment.sh + dest: "{{ test_build_server_repo_path }}" + mode: 0755 + +- name: Validate build environment + shell: "bash test-development-environment.sh {{ item }}" + args: + chdir: "{{ test_build_server_repo_path }}/" + environment: + PYTHON_VERSION: "3.8" + become_user: "{{ test_build_server_user }}" + with_items: + - "unit" + - "js" diff --git a/playbooks/roles/testcourses/defaults/main.yml b/playbooks/roles/testcourses/defaults/main.yml new file mode 100644 index 00000000000..0dad5a43c72 --- /dev/null +++ b/playbooks/roles/testcourses/defaults/main.yml @@ -0,0 +1,13 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role testcourses +# for sandbox, TESTCOURSES_EXPORTS is coming from sandbox-internal repo. +TESTCOURSES_EXPORTS: [] diff --git a/playbooks/roles/testcourses/meta/main.yml b/playbooks/roles/testcourses/meta/main.yml new file mode 100644 index 00000000000..46cc6198bef --- /dev/null +++ b/playbooks/roles/testcourses/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - demo diff --git a/playbooks/roles/testcourses/tasks/deploy.yml b/playbooks/roles/testcourses/tasks/deploy.yml new file mode 100644 index 00000000000..76e8753025e --- /dev/null +++ b/playbooks/roles/testcourses/tasks/deploy.yml @@ -0,0 +1,16 @@ +--- +- include: import_course.yml + when: course.install == True + with_items: "{{ TESTCOURSES_EXPORTS }}" + loop_control: + loop_var: course + +- name: enroll test users in the testcourses + shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}" + args: + chdir: "{{ demo_edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + when: item[1].install == True + with_nested: + - "{{ demo_test_users }}" + - "{{ TESTCOURSES_EXPORTS }}" diff --git a/playbooks/roles/testcourses/tasks/import_course.yml b/playbooks/roles/testcourses/tasks/import_course.yml new file mode 100644 index 00000000000..3d36b8b4552 --- /dev/null +++ b/playbooks/roles/testcourses/tasks/import_course.yml @@ -0,0 +1,14 @@ +- name: check out testcourses + git: + dest: "{{ demo_app_dir }}/{{ course.course_id }}" + repo: "{{ course.github_url }}" + accept_hostkey: yes + become_user: "{{ demo_edxapp_user }}" + register: testcourse_checkout + +- name: import testcourse + shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings={{ demo_edxapp_settings }} import {{ demo_app_dir }}/{{ course.course_id }}" + args: + chdir: "{{ demo_edxapp_code_dir }}" + when: testcourse_checkout.changed + become_user: "{{ common_web_user }}" diff --git a/playbooks/roles/testcourses/tasks/main.yml b/playbooks/roles/testcourses/tasks/main.yml new file mode 100644 index 00000000000..b7054c67db2 --- /dev/null +++ b/playbooks/roles/testcourses/tasks/main.yml @@ -0,0 +1,33 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role testcourses +# +# Overview: +# +# Imports the testcourses course into studio +# https://github.com/edx/edx-testcourses-course +# +# Once imported this role will only re-import the course +# if the edx-testcourses-course repo has been updated +# +# Dependencies: +# - common +# - edxapp +# +# Example play: +# +# roles: +# - common +# - edxapp +# - testcourses + +- include: deploy.yml tags=deploy diff --git a/playbooks/roles/tinymce_plugins/README.rst b/playbooks/roles/tinymce_plugins/README.rst new file mode 100644 index 00000000000..ae9660704e5 --- /dev/null +++ b/playbooks/roles/tinymce_plugins/README.rst @@ -0,0 +1,47 @@ +TinyMCE (Visual Text/HTML Editor) Plugins +----------------------------------------- + +The flexibility of the TinyMCE Visual Text and HTML editor makes it possible to configure and extend the editor using different plugins. In order to make use of that modularity in Studio, you'll need to follow two different steps. + +Installing Plugins +================== + +In order to install the needed TinyMCE plugins while setting up the edX platform, you'll need to specify them in ``TINYMCE_ADDITIONAL_PLUGINS_LIST``. It is a list of objects with the following attributes: + +.. list-table:: + :header-rows: 1 + :widths: 15 10 10 65 + + * - attribute + - type + - required + - description + * - ``repo`` + - string + - yes + - The TinyMCE plugin's repository. + * - ``name`` + - string + - yes + - Specifies the name of the cloned repository for the TinyMCE plugin. + * - ``plugin_path`` + - string + - no + - Specifies the plugin's relative path in the repository. It is the directory that directly contains the ``plugin.js`` file. + Default value is ``/``, which indicates that the repository directly contains the ``plugin.js`` file. If the repository doesn't directly contain the ``plugin.js``, then the folder containing it should share the same name as the plugin name. + +Here's an example: + +.. code:: yaml + + TINYMCE_ADDITIONAL_PLUGINS_LIST: + - repo: https://github.com/name/demo-plugin + name: demo-plugin + plugin_path: "/demo-plugin" + +Enabling Plugins +================ + +There's a decent `guide on enabling the plugins through the edX platform`_, specifically using the ``TINYMCE_ADDITIONAL_PLUGINS`` extra JavaScript configuration. + +.. _guide on enabling the plugins through the edX platform: https://github.com/openedx/edx-platform/blob/master/docs/guides/extensions/tinymce_plugins.rst diff --git a/playbooks/roles/tinymce_plugins/defaults/main.yml b/playbooks/roles/tinymce_plugins/defaults/main.yml new file mode 100644 index 00000000000..5450e04efc0 --- /dev/null +++ b/playbooks/roles/tinymce_plugins/defaults/main.yml @@ -0,0 +1,10 @@ +--- + +tinymce_plugin_temp_dir: "{{ edxapp_code_dir }}/.temp_tinymce_plugin" + +tinymce_dir: "{{ edxapp_code_dir }}/common/static/js/vendor/tinymce" +tinymce_plugins_dir: "{{ tinymce_dir }}/js/tinymce/plugins" + +edx_jake_package: "{{ edxapp_code_dir }}/vendor_extra/tinymce/JakePackage.zip" + +TINYMCE_ADDITIONAL_PLUGINS_LIST: [] diff --git a/playbooks/roles/tinymce_plugins/tasks/import_tinymce_plugin.yml b/playbooks/roles/tinymce_plugins/tasks/import_tinymce_plugin.yml new file mode 100644 index 00000000000..d562e5389d4 --- /dev/null +++ b/playbooks/roles/tinymce_plugins/tasks/import_tinymce_plugin.yml @@ -0,0 +1,18 @@ +--- + +- name: Add additional tinymce plugins + block: + - name: Clone plugin + git: + repo: "{{ plugin.repo }}" + dest: "{{ tinymce_plugin_temp_dir }}/{{ plugin.name }}" + version: "{{ plugin.version | default('master') }}" + accept_hostkey: true + key_file: "{% if EDXAPP_USE_GIT_IDENTITY %}{{ edxapp_git_identity }}{% endif %}" + - name: Move plugin to tinymce plugins directory + command: "cp -r {{ tinymce_plugin_temp_dir }}/{{ plugin.name }}{{ plugin.plugin_path | default('') }} {{ tinymce_plugins_dir }}" + - name: Clean temporary tinymce plugin repository + file: + state: absent + path: "{{ tinymce_plugin_temp_dir }}/{{ plugin.name }}" + become_user: "{{ edxapp_user }}" diff --git a/playbooks/roles/tinymce_plugins/tasks/main.yml b/playbooks/roles/tinymce_plugins/tasks/main.yml new file mode 100644 index 00000000000..e9382d36a3f --- /dev/null +++ b/playbooks/roles/tinymce_plugins/tasks/main.yml @@ -0,0 +1,14 @@ +--- + +- name: Import additional tinymce plugins + include_tasks: import_tinymce_plugin.yml + loop: "{{ TINYMCE_ADDITIONAL_PLUGINS_LIST }}" + loop_control: + loop_var: plugin + when: + - TINYMCE_ADDITIONAL_PLUGINS_LIST|length > 0 + +- name: Rebuild tinymce files + include_tasks: rebuild_tinymce_files.yml + when: + - TINYMCE_ADDITIONAL_PLUGINS_LIST|length > 0 diff --git a/playbooks/roles/tinymce_plugins/tasks/rebuild_tinymce_files.yml b/playbooks/roles/tinymce_plugins/tasks/rebuild_tinymce_files.yml new file mode 100644 index 00000000000..cebed590242 --- /dev/null +++ b/playbooks/roles/tinymce_plugins/tasks/rebuild_tinymce_files.yml @@ -0,0 +1,22 @@ +--- + +- name: Minify tinymce plugin files + block: + - name: Unarchive JakePackage.zip + unarchive: + src: "{{ edx_jake_package }}" + dest: "{{ tinymce_dir }}" + remote_src: True + - name: Clean install npm dependencies from archive + shell: "npm ci" + args: + chdir: "{{ tinymce_dir }}" + - name: Clean existing tinymce js files with jake + shell: "npx jake clean-js" + args: + chdir: "{{ tinymce_dir }}" + - name: Build tinymce with jake + shell: "npx jake minify bundle[themes:*,plugins:*]" + args: + chdir: "{{ tinymce_dir }}" + become_user: "{{ edxapp_user }}" diff --git a/playbooks/roles/tools_jenkins/defaults/main.yml b/playbooks/roles/tools_jenkins/defaults/main.yml new file mode 100644 index 00000000000..99d9df34bcf --- /dev/null +++ b/playbooks/roles/tools_jenkins/defaults/main.yml @@ -0,0 +1,82 @@ +--- + +JENKINS_TOOLS_VERSION: "1.651.3" + +jenkins_tools_plugins: + - { name: "cloudbees-folder", version: "5.12" } + - { name: "structs", version: "1.2" } + - { name: "job-dsl", version: "1.52" } + - { name: "greenballs", version: "1.15" } + - { name: "rebuild", version: "1.25" } + - { name: "build-user-vars-plugin", version: "1.5" } + - { name: "mailer", version: "1.17" } + - { name: "credentials", version: "2.1.4" } + - { name: "ssh-credentials", version: "1.12" } + - { name: "ssh-agent", version: "1.13" } + - { name: "bouncycastle-api", version: "1.648.3" } + - { name: "token-macro", version: "2.1" } + - { name: "parameterized-trigger", version: "2.32" } + - { name: "conditional-buildstep", version: "1.3.5" } + - { name: "run-condition", version: "0.10" } + - { name: "multiple-scms", version: "0.6" } + - { name: "throttle-concurrents", version: "1.9.0" } + - { name: "mask-passwords", version: "2.8" } + - { name: "dashboard-view", version: "2.9.10" } + - { name: "tmpcleaner", version: "1.2" } + - { name: "jobConfigHistory", version: "2.15" } + - { name: "build-timeout", version: "1.17.1" } + - { name: "timestamper", version: "1.8.4" } + - { name: "postbuild-task", version: "1.8" } + - { name: "notification", version: "1.10" } + - { name: "shiningpanda", version: "0.23" } + - { name: "build-name-setter", version: "1.6.5" } + - { name: "simple-parameterized-builds-report", version: "1.5" } + - { name: "junit", version: "1.15" } + - { name: "scm-api", version: "1.2" } + - { name: "workflow-scm-step", version: "1.14.2" } + - { name: "workflow-step-api", version: "1.14.2" } + - { name: "ghprb", version: "1.35.0" } + - { name: "github-api", version: "1.90" } + - { name: "git-client", version: "1.21.0"} + - { name: "git", version: "2.5.3"} + - { name: "github", version: "1.21.1" } + - { name: "plain-credentials", version: "1.2" } + - { name: "github-oauth", version: "0.24" } + - { name: "gradle", version: "1.25" } + - { name: "credentials-binding", version: "1.10" } + - { name: "envinject", version: "1.92.1" } + - { name: "email-ext", version: "2.57.2" } + - { name: "text-finder", version: "1.10"} + +# matrix-auth is now pinned to avoid Jenkins overriding +# 1.3 and later requires icon-shim + - { name: "matrix-auth", version: "1.4" } + - { name: "icon-shim", version: "2.0.3" } + +# ec2 + dependencies, used by the android build workers + any additional workers we build + - { name: "ec2", version: "1.36" } + - { name: "node-iterator-api", version: "1.5" } + - { name: "aws-java-sdk", version: "1.11.37" } + - { name: "jackson2-api", version: "2.5.4" } + - { name: "aws-credentials", version: "1.11" } + +jenkins_tools_bundled_plugins: + - 'matrix-auth' + - "credentials" + - "git" + - "pam-auth" + - "ssh-credentials" + - "ssh-slaves" + +jenkins_tools_helm_url: "https://get.helm.sh/helm-v3.5.2-linux-amd64.tar.gz" +jenkins_venv_src_dir: "../util/jenkins" + +jenkins_tools_debian_pkgs: + - nginx + - git + - maven + - daemon + - python-pycurl + - psmisc + - mysql-client=5.7.* + - ruby-sass diff --git a/playbooks/roles/tools_jenkins/meta/main.yml b/playbooks/roles/tools_jenkins/meta/main.yml new file mode 100644 index 00000000000..b46e2c6acb0 --- /dev/null +++ b/playbooks/roles/tools_jenkins/meta/main.yml @@ -0,0 +1,21 @@ +--- +dependencies: + - common + - edxapp_common + - browsers + - role: jenkins_master + jenkins_plugins: "{{ jenkins_tools_plugins }}" + JENKINS_VERSION: "{{ JENKINS_TOOLS_VERSION }}" + jenkins_deb_url: "https://pkg.jenkins.io/debian-stable/binary/jenkins_%7B%7B%20JENKINS_VERSION%20%7D%7D_all.deb" + jenkins_custom_plugins: [] + jenkins_bundled_plugins: "{{ jenkins_tools_bundled_plugins }}" + jenkins_debian_pkgs: "{{ jenkins_tools_debian_pkgs }}" + jenkins_venv_src_dir: "{{ jenkins_venv_src_dir }}" + jenkins_install_mysql_5_7: true + + # Needed to be able to build docker images. Used by Docker Image Builder Jobs. + - role: docker-tools + docker_users: + - '{{ jenkins_user }}' + + - role: mongo_client diff --git a/playbooks/roles/tools_jenkins/tasks/main.yml b/playbooks/roles/tools_jenkins/tasks/main.yml new file mode 100644 index 00000000000..48aba09c1c2 --- /dev/null +++ b/playbooks/roles/tools_jenkins/tasks/main.yml @@ -0,0 +1,44 @@ +--- +# The deadsnakes PPA is required to install python3.5 on Trusty. +# Xenial comes with python3.5 installed. +- name: add deadsnakes repository + apt_repository: + repo: "ppa:deadsnakes/ppa" + when: ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - install + - install:system-requirements + +- name: install python3.5, 3.6 and 3.8 + apt: + name: "{{ item }}" + with_items: + - python3.5 + - python3.5-dev + - python3.6 + - python3.6-dev + - python3.8 + - python3.8-dev + when: ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + tags: + - install + - install:system-requirements + +- name: download helm package + get_url: + url: "{{ jenkins_tools_helm_url }}" + dest: "/tmp/helm.tar.gz" + mode: 0644 + +- name: extract helm archive + unarchive: + src: "/tmp/helm.tar.gz" + dest: "/tmp/" + copy: false + +- name: copy helm binary to executable path + copy: + src: /tmp/linux-amd64/helm + dest: /usr/local/bin/helm + remote_src: yes + mode: 0755 diff --git a/playbooks/roles/user/defaults/main.yml b/playbooks/roles/user/defaults/main.yml new file mode 100644 index 00000000000..0378c70ffda --- /dev/null +++ b/playbooks/roles/user/defaults/main.yml @@ -0,0 +1,46 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Vars for role user +# + + +# +# vars are namespace with the module name. +# +user_role_name: user + +# override this var to add a prefix to the prompt +# also need to set comment_update_bashrc for to +# update the system bashrc default +USER_CMD_PROMPT: "" + +# these are the default links to create in the +# restricted user's ~/bin directory +# defaults to sudo, more can be added by overriding +# this var +user_rbash_links: + - /usr/bin/sudo + +# parameter for this role, +# must be set when called and should NOT +# be set in extra vars since it +# will take precedence over the paramter +user_info: [] + +# Boolean variable that will cause the user module to stop Ansible with a +# failure if a user that has been configured to have their keys pulled from +# GitHub does not have any ssh keys configured on GitHub. This is set to +# false by default as we normally do not wish to interrupt Ansible, but +# we wish to selectively enable it for a particular Jenkins job that adds +# users. In the default false state this playbook will only print a warning +# message, but not halt. +# +USER_FAIL_MISSING_KEYS: false diff --git a/playbooks/roles/user/meta/main.yml b/playbooks/roles/user/meta/main.yml new file mode 100644 index 00000000000..8bebd4b88c7 --- /dev/null +++ b/playbooks/roles/user/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common_vars \ No newline at end of file diff --git a/playbooks/roles/user/tasks/main.yml b/playbooks/roles/user/tasks/main.yml new file mode 100644 index 00000000000..6c316d67ff1 --- /dev/null +++ b/playbooks/roles/user/tasks/main.yml @@ -0,0 +1,272 @@ +--- + +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# Tasks for role user +# +# Overview: +# +# This role is included as a dependency by other roles or as a standalone +# paramaterized role to create users. +# +# There are generally three classes of users: +# (1) normal login users without any special permissions +# (2) admin users with full sudo permissions +# (3) restricted users that use rbash and are locked down to specific sudo commands +# +# The parameter "type" sets the user in one of these three categories: +# (1) type not set +# (2) type=admin +# (3) type=restricted +# +# Dependencies: +# +# This role has no dependencies but requires parameters +# +# Example: +# +# # Create a few users, one restricted +# # one admin with a github key and one with +# # a regular key. +# # +# # All user types can use a key from github +# # and also have additional authorized keys defined +# # +# +# - role: user +# user_info: +# # This restricted user is defined in meta/ +# # for edxapp, it creates a user that can only +# # run manage.py commands +# - name: automator +# type: restricted +# state: present +# # The sudoers file is optional. +# sudoers_template: '99-edxapp-manage-cmds.j2' +# authorized_keys: +# - ssh-rsa abcdef... +# - ssh-rsa ghiklm... +# +# # More users passed to the role, this one is a user +# # with full sudo, key fetched from github +# - name: frank +# github: true +# type: admin +# state: present +# +# # This user is a normal login user without sudo, with +# # a couple keys passed in as parameters +# - name: sally +# authorized_keys: +# - ssh-rsa abcdef... +# - ssh-rsa ghiklm... +# +# By default for restricted users we only allow sudo, if you +# want to provide more binaries add them to user_rbash_links +# which can be passed in as a parameter to the role. +# +- debug: + var: user_info + +- name: Create the edxadmin group + group: + name: edxadmin + state: present + +# some AMIs (such as EMR master nodes) don't read the config files out of /etc/sudoers.d by default +- name: Ensure sudoers.d is read + lineinfile: + dest: /etc/sudoers + state: present + regexp: '^#includedir /etc/sudoers.d' + line: '#includedir /etc/sudoers.d' + validate: 'visudo -cf %s' + +# give full sudo admin access to the edxadmin group +- name: Grant full sudo access to the edxadmin group + copy: + content: "%edxadmin ALL=(ALL) NOPASSWD:ALL" + dest: /etc/sudoers.d/edxadmin + owner: root + group: root + mode: 0440 + validate: 'visudo -cf %s' + +- name: Create the users + user: + name: "{{ item.name }}" + shell: /bin/bash + remove: yes + state: "{{ item.state | default('present') }}" + with_items: "{{ user_info }}" + +- name: Assign admin role to admin users + user: + name: "{{ item.name }}" + groups: edxadmin + when: item.type is defined and item.type == 'admin' and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Check the ssh key(s) for user(s) over github + uri: + url: "https://github.com/%7B%7B%20item.name%20%7D%7D.keys" + return_content: true + # We don't care if absent users lack ssh keys + when: item.get('state', 'present') == 'present' and item.github is defined + with_items: "{{ user_info }}" + register: github_users_return + until: github_users_return is succeeded + retries: 5 + +- name: Print warning if github user(s) missing ssh key + debug: + msg: "User {{ item.item.name }} doesn't have an SSH key associated with their github account" + with_items: "{{ github_users_return.results | default([]) }}" + # We skip users in the previous task, and they end up with no content_length + when: ('content' in item and item.content == "") + +- name: Halt if USER_FAIL_MISSING_KEYS is true and github user(s) missing ssh key + fail: + msg: "User {{ item.item.name }} doesn't have an SSH key associated with their github account" + with_items: "{{ github_users_return.results | default([]) }}" + # We skip users in the previous task, and they end up with no content_length + when: (USER_FAIL_MISSING_KEYS and 'content' in item and item.content == "") + +- name: Get github key(s) and update the authorized_keys file + authorized_key: + user: "{{ item.name }}" + exclusive: yes + key: "https://github.com/%7B%7B%20item.name%20%7D%7D.keys" + when: item.github is defined and item.get('state', 'present') == 'present' + register: task_result + until: task_result is succeeded + retries: 5 + with_items: "{{ user_info }}" + +- name: Create ~/.ssh directory for non github users + file: + path: "/home/{{ item.name }}/.ssh" + state: directory + mode: "0755" + owner: "{{ item.name }}" + group: "{{ item.name }}" + when: item.authorized_keys is defined and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Build authorized_keys file for non github users + template: + src: "templates/authorized_keys.j2" + dest: "/home/{{ item.name }}/.ssh/authorized_keys" + mode: "0600" + owner: "{{ item.name }}" + group: "{{ item.name }}" + with_items: "{{ user_info }}" + when: item.authorized_keys is defined and item.get('state', 'present') == 'present' + +- name: Create bashrc file for normal users + template: + src: default.bashrc.j2 + dest: "/home/{{ item.name }}/.bashrc" + mode: "0640" + owner: "{{ item.name }}" + when: not (item.type is defined and item.type == 'restricted') and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Create .profile for all users + template: + src: default.profile.j2 + dest: "/home/{{ item.name }}/.profile" + mode: "0640" + owner: "{{ item.name }}" + when: item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +######################################################## +# All tasks below this line are for restricted users + +- name: Modify shell for restricted users + user: + name: "{{ item.name }}" + shell: /bin/rbash + when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Create bashrc file for restricted users + template: + src: restricted.bashrc.j2 + dest: "/home/{{ item.name }}/.bashrc" + mode: "0640" + owner: "{{ item.name }}" + when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Create sudoers file from template + template: + dest: /etc/sudoers.d/99-restricted + src: restricted.sudoers.conf.j2 + owner: root + group: root + mode: 0440 + validate: 'visudo -cf %s' + + # Prevent restricted user from updating their PATH and + # environment by ensuring root ownership + +- name: Change home directory ownership to root for restricted users + file: + path: "/home/{{ item.name }}" + owner: root + group: "{{ item.name }}" + recurse: yes + when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Create ~/bin directory + file: + path: "/home/{{ item.name }}/bin" + state: directory + mode: "0750" + owner: root + group: "{{ item.name }}" + when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Create allowed command links + file: + src: "{{ item[1] }}" + dest: "/home/{{ item[0].name }}/bin/{{ item[1]|basename }}" + state: link + when: item[0].type is defined and item[0].type == 'restricted' and item[0].get('state', 'present') == 'present' + with_nested: + - "{{ user_info }}" + - "{{ user_rbash_links }}" + +- name: remove ssh AllowUsers directive + lineinfile: + dest: /etc/ssh/sshd_config + regexp: "^AllowUsers" + state: absent + register: users_ssh_access + +- name: Restart ssh (ubuntu/debian) + service: + name: ssh + state: restarted + become: True + when: > + users_ssh_access.changed and + ansible_distribution in common_debian_variants + +- name: Restart ssh (redhat) + service: + name: sshd + state: restarted + become: True + when: > + users_ssh_access.changed and + ansible_distribution in common_redhat_variants diff --git a/playbooks/roles/user/templates/authorized_keys.j2 b/playbooks/roles/user/templates/authorized_keys.j2 new file mode 100644 index 00000000000..67dd4df13c1 --- /dev/null +++ b/playbooks/roles/user/templates/authorized_keys.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% for line in item.authorized_keys -%} +{{ line }} +{% endfor %} diff --git a/playbooks/roles/gh_users/templates/default.bashrc.j2 b/playbooks/roles/user/templates/default.bashrc.j2 similarity index 84% rename from playbooks/roles/gh_users/templates/default.bashrc.j2 rename to playbooks/roles/user/templates/default.bashrc.j2 index e888c583720..b320a6612df 100644 --- a/playbooks/roles/gh_users/templates/default.bashrc.j2 +++ b/playbooks/roles/user/templates/default.bashrc.j2 @@ -53,10 +53,12 @@ if [ -n "$force_color_prompt" ]; then fi fi +command -v ec2metadata >/dev/null 2>&1 && { INSTANCEID=$(ec2metadata --instance-id); } + if [ "$color_prompt" = yes ]; then - PS1='{{ GH_USERS_PROMPT }}${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' + PS1='{{ USER_CMD_PROMPT }}${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h $INSTANCEID\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' else - PS1='{{ GH_USERS_PROMPT}}${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' + PS1='{{ USER_CMD_PROMPT}}${debian_chroot:+($debian_chroot)}\u@\h $INSTANCEID:\w\$ ' fi unset color_prompt force_color_prompt @@ -73,9 +75,6 @@ esac if [ -x /usr/bin/dircolors ]; then test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" alias ls='ls --color=auto' - #alias dir='dir --color=auto' - #alias vdir='vdir --color=auto' - alias grep='grep --color=auto' alias fgrep='fgrep --color=auto' alias egrep='egrep --color=auto' @@ -85,6 +84,7 @@ fi alias ll='ls -alF' alias la='ls -A' alias l='ls -CF' +alias h='ls ~/.bash_histories/*/* | sort | xargs grep -i ' # better bash history @@ -96,13 +96,15 @@ if [ "$BASH_VERSINFO" -ge 2 ]; then shopt -s checkhash shopt -s no_empty_cmd_completion shopt -s execfail + export HISTCONTROL=ignorespace + unset HISTFILESIZE + unset HISTSIZE + export HISTTIMEFORMAT='%Y-%m-%d %H:%M:%S - ' + shopt -s histappend - HISTFILESIZE=10000 - HISTDIR="$HOME/.bash_histories/`uname -n`" - [[ ! -d $HISTDIR ]] && mkdir -p "$HISTDIR" - HISTFILE="$HISTDIR/`date +%Y_%m`" - # Suppress duplicates, bare "ls" and bg,fg and exit - HISTIGNORE="&:ls:[bf]g:exit" + # save multi-line commands as a single command with semi-colons + + shopt -s cmdhist fi diff --git a/playbooks/roles/user/templates/default.profile.j2 b/playbooks/roles/user/templates/default.profile.j2 new file mode 100644 index 00000000000..1865ceea890 --- /dev/null +++ b/playbooks/roles/user/templates/default.profile.j2 @@ -0,0 +1,8 @@ +umask 022 +# if running bash +if [ -n "$BASH_VERSION" ]; then + # include .bashrc if it exists + if [ -f "$HOME/.bashrc" ]; then + . "$HOME/.bashrc" + fi +fi diff --git a/playbooks/roles/automated/files/home/automator/.bashrc b/playbooks/roles/user/templates/restricted.bashrc.j2 similarity index 100% rename from playbooks/roles/automated/files/home/automator/.bashrc rename to playbooks/roles/user/templates/restricted.bashrc.j2 diff --git a/playbooks/roles/user/templates/restricted.sudoers.conf.j2 b/playbooks/roles/user/templates/restricted.sudoers.conf.j2 new file mode 100644 index 00000000000..d4ddcc9049d --- /dev/null +++ b/playbooks/roles/user/templates/restricted.sudoers.conf.j2 @@ -0,0 +1,7 @@ +{% for user in user_info -%} +{% if 'sudo_cmds' in user -%} +{% for cmd in user['sudo_cmds'] -%} +{{ user['name'] }} {{ cmd }} +{% endfor %} +{% endif %} +{% endfor %} diff --git a/playbooks/roles/user_retirement_pipeline/README.rst b/playbooks/roles/user_retirement_pipeline/README.rst new file mode 100644 index 00000000000..a441da3023c --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/README.rst @@ -0,0 +1,174 @@ +User Retirement Pipeline. +######################### + +In the Open edX platform, the user experience is enabled by several +services, such as LMS, Studio, ecommerce, credentials, discovery, and +more. Personally Identifiable Identification (PII) about a user can +exist in many of these services. As a consequence, to remove a user’s +PII, you must be able to request each service containing PII to remove, +delete, or unlink the data for that user in that service. + +In the user retirement feature, a centralized process (the driver +scripts) orchestrates all of these requests. For information about how +to configure the driver scripts, see Setting Up the User Retirement +Driver Scripts. + +`More info +here. `__ + +Configuration & Deployment +************************** + +The user retirement pipeline can be deployed together with the edxapp +role, on small deployments that use a single AppServer to host all +services, or standalone, which is the default for bigger installs. + +You can also use ansible-playbook to test this role independently. It +requires you to pass more variables manually because they're not +available except when running inside "edxapp" role. + +When running this role, you'll need to set: + +- ``COMMON_RETIREMENT_SERVICE_SETUP``: Set to true to configure the + retirement service pipeline +- ``RETIREMENT_SERVICE_COOL_OFF_DAYS``: Number of days that an account + stays marked for deletion before being picked up be the retirement + service +- ``RETIREMENT_SERVICE_ENABLE_CRON_JOB``: Set to true if you want to + set up a daily cron job for the retirement service +- ``EDXAPP_RETIREMENT_SERVICE_USER_EMAIL``: Email of the retirement + worker user set up on LMS +- ``EDXAPP_RETIREMENT_SERVICE_USER_NAME``: Username of the retirement + worker user set up on LMS +- ``RETIREMENT_SERVICE_EDX_OAUTH2_KEY``: OAuth2 client id from LMS +- ``RETIREMENT_SERVICE_EDX_OAUTH2_SECRET``: OAuth2 client secret from + LMS +- ``RETIREMENT_LMS_BASE_URL``: Full LMS url + (e.g. ``https://lms.domain.com``) +- ``RETIREMENT_ECOMMERCE_BASE_BASE_URL``: Full LMS url + (e.g. ``https://lms.domain.com``) +- ``RETIREMENT_CREDENTIALS_BASE_URL``: Full LMS url + (e.g. ``https://lms.domain.com``) + +To use a custom retirement pipeline, you'll need to configure the git +remotes and also the retirement pipeline "steps". + +To set up the git repository, you can follow this template: + +:: + + RETIREMENT_SERVICE_GIT_IDENTITY: !!null + RETIREMENT_SERVICE_GIT_REPOS: + - PROTOCOL: "https" + DOMAIN: "github.com" + PATH: "edx" + REPO: "tubular.git" + VERSION: "master" + DESTINATION: "{{ retirement_service_app_dir }}" + SSH_KEY: "{{ RETIREMENT_SERVICE_GIT_IDENTITY }}" + +And to set up the retirement pipeline, you'll need to set +``RETIREMENT_SERVICE_PIPELINE_CONFIGURATION`` according to the following +example: + +:: + + RETIREMENT_SERVICE_PIPELINE_CONFIGURATION: + - NAME: "RETIRING_ENROLLMENTS" + NAME_COMPLETE: "ENROLLMENTS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_unenroll" + - NAME: "RETIRING_LMS_MISC" + NAME_COMPLETE: "LMS_MISC_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire_misc" + - NAME: "RETIRING_LMS" + NAME_COMPLETE: "LMS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire" + +You can also test this role on your Docker devstack, like this: + +1. Clone this branch to ``./src`` folder of your ``master`` devstack. +2. From the ``devstack`` folder, run ``make lms-shell`` and edit + ``lms.env.json`` to set these variables: + +:: + + .... + "RETIRED_USER_SALTS": ["oWiJVxbtp86kEV4jAHcZXSoSucSSF6GE6qjFA8rZp8yBPMSwKM",], + "EDXAPP_RETIREMENT_SERVICE_USER_NAME": "retirement_service_worker", + "RETIREMENT_STATES": [ + "PENDING", + "RETIRING_ENROLLMENTS", + "ENROLLMENTS_COMPLETE", + "RETIRING_LMS_MISC", + "LMS_MISC_COMPLETE", + "RETIRING_LMS", + "LMS_COMPLETE", + "RETIRING_CREDENTIALS", + "CREDENTIALS_COMPLETE", + "ERRORED", + "ABORTED", + "COMPLETE" + ], + ... + "FEATURES": { + ... + "ENABLE_ACCOUNT_DELETION": true + } + +3. Populate the retirement states: + +:: + + ./manage.py lms --settings=devstack_docker populate_retirement_states + +3. Create the user and OAuth2 Credentials for the retirement worker: + +:: + + app_name=retirement + user_name=retirement_service_worker + ./manage.py lms --settings= manage_user $user_name $user_name@example.com --staff --superuser + ./manage.py lms --settings= create_dot_application $app_name $user_name + +Take a note of the generated client id and secret, you'll need it to set +up the retirement scripts. 4. Now, use the Ansible Role to set up the +User Retirement Pipeline: + +:: + + export PYTHONUNBUFFERED=1 + source /edx/app/edx_ansible/venvs/edx_ansible/bin/activate + cd /edx/src/configuration/playbooks + ansible-playbook \ + -i localhost, \ + -c local run_role.yml \ + -e role=user_retirement_pipeline \ + -e CONFIGURATION_VERSION=master \ + -e EDX_PLATFORM_VERSION=master \ + -e edxapp_user=root \ + -e COMMON_RETIREMENT_SERVICE_SETUP=true \ + -e RETIREMENT_SERVICE_COOL_OFF_DAYS=0 \ + -e RETIREMENT_SERVICE_ENABLE_CRON_JOB=true \ + -e EDXAPP_RETIREMENT_SERVICE_USER_EMAIL=retirement_service_worker@example.com \ + -e EDXAPP_RETIREMENT_SERVICE_USER_NAME=retirement_service_worker \ + -e RETIREMENT_SERVICE_EDX_OAUTH2_KEY= \ + -e RETIREMENT_SERVICE_EDX_OAUTH2_SECRET= + +3. Check that the retirement pipeline is correctly set up at + ``/edx/app/retirement_service``. +4. Create some users and go the their account page and mark them for + deletion. |mar| +5. Check + `here `__ + if the retirement requests have been registered. +6. Run the retirement script as root: + +:: + + /edx/app/retirement_service/retire_users.sh + +.. |mar| image:: https://user-images.githubusercontent.com/27893385/53957569-6b9da180-40bd-11e9-9139-10c62e499ec4.png + diff --git a/playbooks/roles/user_retirement_pipeline/defaults/main.yml b/playbooks/roles/user_retirement_pipeline/defaults/main.yml new file mode 100644 index 00000000000..2ab1beb533e --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/defaults/main.yml @@ -0,0 +1,80 @@ +--- +# +# Open edX Retirement Pipeline Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# Deploy the User Retirement Pipeline +# +# See documentation in README.rst + +# +# This file contains the variables you'll need to pass to the role, and some +# example values. + +retirement_service_name: "retirement-service" +retirement_service_user: "edx_retirement_worker" +retirement_service_home: "{{ COMMON_APP_DIR }}/retirement_service" +retirement_service_app_dir: "{{ retirement_service_home }}/tubular" +retirement_service_data_dir: "{{ COMMON_DATA_DIR }}/retirement_service" +retirement_service_log_dir: "{{ COMMON_LOG_DIR }}/retirement_service" +retirement_service_venv_dir: "{{ retirement_service_home }}/venv" +retirement_service_user_shell: "/bin/bash" +retirement_service_script_path: "{{ retirement_service_app_dir }}/scripts" + +retirement_service_pip_version: "{{ COMMON_PIP_VERSION }}" + +retirement_service_environment: + PATH: '{{ retirement_service_venv_dir }}/bin:{{ ansible_env.PATH }}' + +RETIREMENT_SERVICE_VERSION: "master" + +# Set up git repos +RETIREMENT_SERVICE_GIT_IDENTITY: !!null +RETIREMENT_SERVICE_GIT_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: "tubular.git" + VERSION: "{{ RETIREMENT_SERVICE_VERSION }}" + DESTINATION: "{{ retirement_service_app_dir }}" + SSH_KEY: "{{ RETIREMENT_SERVICE_GIT_IDENTITY }}" + +# Enable the setup and configuration of the retirement service +# The setup is handled by COMMON_RETIREMENT_SERVICE_SETUP on common vars +RETIREMENT_SERVICE_COOL_OFF_DAYS: 5 + +# Enable the configuration of a cron job to run this periodically +RETIREMENT_SERVICE_ENABLE_CRON_JOB: false +RETIREMENT_SERVICE_CRON_JOB_HOURS: 0 +RETIREMENT_SERVICE_CRON_JOB_MINUTES: 0 + +# Configuration needed for LMS and OAuth +# The retirement LMS username should be set on edxapp role through +# EDXAPP_RETIREMENT_SERVICE_USER_EMAIL and EDXAPP_RETIREMENT_SERVICE_USER_NAME +RETIREMENT_SERVICE_EDX_OAUTH2_KEY: "PLEASE-SET-THIS-RETIREMENT-CLIENT-ID" +RETIREMENT_SERVICE_EDX_OAUTH2_SECRET: "PLEASE-SET-THIS-RETIREMENT-CLIENT-SECRET" + +# LMS, Ecommerce and Credentials base URL +RETIREMENT_LMS_BASE_URL: 'http://edx.devstack.lms:18000/' +RETIREMENT_ECOMMERCE_BASE_BASE_URL: 'http://edx.devstack.ecommerce:18000/' +RETIREMENT_CREDENTIALS_BASE_URL: 'http://edx.devstack.credentials:18000/' + +# Retirement Pipeline configuration +RETIREMENT_SERVICE_PIPELINE_CONFIGURATION: + - NAME: "RETIRING_ENROLLMENTS" + NAME_COMPLETE: "ENROLLMENTS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_unenroll" + - NAME: "RETIRING_LMS_MISC" + NAME_COMPLETE: "LMS_MISC_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire_misc" + - NAME: "RETIRING_LMS" + NAME_COMPLETE: "LMS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire" diff --git a/playbooks/roles/user_retirement_pipeline/meta/main.yml b/playbooks/roles/user_retirement_pipeline/meta/main.yml new file mode 100644 index 00000000000..7529cd514b8 --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/meta/main.yml @@ -0,0 +1,11 @@ +dependencies: + - common + - role: add_user + user_name: "{{ retirement_service_user }}" + user_home: "{{ retirement_service_home }}" + group_name: "{{ common_web_group }}" + - role: git_clone + repo_owner: "{{ retirement_service_user }}" + repo_group: "{{ retirement_service_user }}" + GIT_REPOS: "{{ RETIREMENT_SERVICE_GIT_REPOS }}" + git_home: "{{ retirement_service_home }}" diff --git a/playbooks/roles/user_retirement_pipeline/tasks/main.yml b/playbooks/roles/user_retirement_pipeline/tasks/main.yml new file mode 100644 index 00000000000..c532c2f3d0c --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/tasks/main.yml @@ -0,0 +1,79 @@ +- name: Create data and log directories + file: + path: "{{ item }}" + state: directory + owner: "{{ retirement_service_user }}" + group: "{{ common_web_group }}" + mode: 0755 + with_items: + - "{{ retirement_service_data_dir }}/learners_to_retire" + - "{{ retirement_service_log_dir }}" + tags: + - install + - install:configuration + +- name: Install python3 + apt: + name: "{{ item }}" + with_items: + - python3-pip + - python3-dev + tags: + - install + - install:requirements + +- name: Build virtualenv with python3 + command: "virtualenv --python=python3 {{ retirement_service_venv_dir }}" + args: + creates: "{{ retirement_service_venv_dir }}/bin/pip" + become_user: "{{ retirement_service_user }}" + tags: + - install + - install:requirements + +- name: Pin pip to a specific version. + command: "{{ retirement_service_venv_dir }}/bin/pip install pip=={{ retirement_service_pip_version }}" + become_user: "{{ retirement_service_user }}" + tags: + - install + - install:requirements + +- name: Install python requirements + command: pip install -r requirements.txt + args: + chdir: "{{ retirement_service_app_dir }}" + become_user: "{{ retirement_service_user }}" + environment: "{{ retirement_service_environment }}" + tags: + - install + - install:requirements + +- name: Generate configuration file for retirement service + template: + src: "config.yml.j2" + dest: "{{ retirement_service_home }}/config.yml" + owner: "{{ retirement_service_user }}" + group: "{{ common_web_group }}" + tags: + - install + - install:configuration + +- name: Set up user retirement script + template: + src: "retire_users.sh.j2" + dest: "{{ retirement_service_home }}/retire_users.sh" + mode: 0750 + owner: "{{ retirement_service_user }}" + group: "{{ common_web_group }}" + tags: + - install + - install:configuration + +- name: Install cron job for automatically running User Retirement + cron: + name: "Run user retirement pipeline" + job: "{{retirement_service_home}}/retire_users.sh" + hour: "{{ RETIREMENT_SERVICE_CRON_JOB_HOURS }}" + minute: "{{ RETIREMENT_SERVICE_CRON_JOB_MINUTES }}" + day: "*" + when: RETIREMENT_SERVICE_ENABLE_CRON_JOB diff --git a/playbooks/roles/user_retirement_pipeline/templates/config.yml.j2 b/playbooks/roles/user_retirement_pipeline/templates/config.yml.j2 new file mode 100644 index 00000000000..31a6eedf18a --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/templates/config.yml.j2 @@ -0,0 +1,12 @@ +client_id: {{ RETIREMENT_SERVICE_EDX_OAUTH2_KEY }} +client_secret: {{ RETIREMENT_SERVICE_EDX_OAUTH2_SECRET }} + +base_urls: + lms: {{ RETIREMENT_LMS_BASE_URL }} + ecommerce: {{ RETIREMENT_ECOMMERCE_BASE_BASE_URL }} + credentials: {{ RETIREMENT_CREDENTIALS_BASE_URL }} + +retirement_pipeline: + {% for item in RETIREMENT_SERVICE_PIPELINE_CONFIGURATION %} + - ['{{ item.NAME }}', '{{ item.NAME_COMPLETE }}', '{{ item.SERVICE }}', '{{ item.FUNCTION }}'] + {% endfor %} diff --git a/playbooks/roles/user_retirement_pipeline/templates/retire_users.sh.j2 b/playbooks/roles/user_retirement_pipeline/templates/retire_users.sh.j2 new file mode 100644 index 00000000000..d9441b20ef8 --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/templates/retire_users.sh.j2 @@ -0,0 +1,43 @@ +#!/bin/bash + +# Log to console and to file +LOG_LOCATION={{retirement_service_log_dir}} +TIMESTAMP=$(date -u) +exec > >(tee -i "$LOG_LOCATION/retirement-logs-[$TIMESTAMP].log") +exec 2>&1 +echo "Writing logs to: [ $LOG_LOCATION ]" + +# Ensure only one instance of the retirement script is running +LOCKFILE={{retirement_service_data_dir }}/retirement.lock +if [ -e ${LOCKFILE} ] && kill -0 `cat ${LOCKFILE}`; then + echo "Retirement Pipeline already running!" + exit +fi +# make sure the lockfile is removed when we exit and then claim it +trap "rm -f ${LOCKFILE}; exit" INT TERM EXIT +echo $$ > ${LOCKFILE} + +# Source virtualenv +source {{retirement_service_venv_dir}}/bin/activate + +# List users that are ready to be deleted, after the specified cool off period +{{retirement_service_script_path}}/get_learners_to_retire.py \ + --config_file={{retirement_service_home}}/config.yml \ + --output_dir={{retirement_service_data_dir }}/learners_to_retire \ + --cool_off_days={{RETIREMENT_SERVICE_COOL_OFF_DAYS}} + +# Loop over users and run deletion pipeline +unset $RETIREMENT_USERNAME +for filename in {{retirement_service_data_dir }}/learners_to_retire/*; do + [ -e "$filename" ] || continue + . $filename + echo "Deleting user: $RETIREMENT_USERNAME..." + {{retirement_service_script_path}}/retire_one_learner.py \ + --config_file={{retirement_service_home}}/config.yml \ + --username=$RETIREMENT_USERNAME + unset $RETIREMENT_USERNAME + rm -f $filename +done + +# Clear the lockfile +rm -f $LOCKFILE diff --git a/playbooks/roles/vhost/defaults/main.yml b/playbooks/roles/vhost/defaults/main.yml new file mode 100644 index 00000000000..8c8fa266d6f --- /dev/null +++ b/playbooks/roles/vhost/defaults/main.yml @@ -0,0 +1,33 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role vhost +# + +# Specify a name for vhost deployments, e.g. aws or openstack. Service files +# specific to the vhost will be namespaced in directories with this name. +VHOST_NAME: 'vhost' + +vhost_dirs: + home: + path: "{{ COMMON_APP_DIR }}/{{ VHOST_NAME }}" + owner: "root" + group: "root" + mode: "0755" + logs: + path: "{{ COMMON_LOG_DIR }}/{{ VHOST_NAME }}" + owner: "syslog" + group: "syslog" + mode: "0650" + data: + path: "{{ COMMON_DATA_DIR }}/{{ VHOST_NAME }}" + owner: "root" + group: "root" + mode: "0700" diff --git a/playbooks/roles/vhost/meta/main.yml b/playbooks/roles/vhost/meta/main.yml new file mode 100644 index 00000000000..932980a4d7e --- /dev/null +++ b/playbooks/roles/vhost/meta/main.yml @@ -0,0 +1,14 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role vhost +# +dependencies: + - common diff --git a/playbooks/roles/vhost/tasks/main.yml b/playbooks/roles/vhost/tasks/main.yml new file mode 100644 index 00000000000..cb2eaa42d99 --- /dev/null +++ b/playbooks/roles/vhost/tasks/main.yml @@ -0,0 +1,154 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role vhost +# +# Overview: +# +# This task is to contain tasks that should be run in vhost +# vitualation environments like AWS and Vagrant, but not in +# containers. You typically would not run this role +# independently +# +# Dependencies: +# - common +# + +- name: Create all service directories + file: + path: "{{ item.value.path }}" + state: directory + owner: "{{ item.value.owner }}" + group: "{{ item.value.group }}" + mode: "{{ item.value.mode }}" + with_dict: "{{ vhost_dirs }}" + +- name: Add script for syncing logs on exit + template: + src: sync-logs-on-exit.j2 + dest: "{{ COMMON_OBJECT_STORE_LOG_SYNC_ON_EXIT }}" + owner: root + group: root + mode: 0744 + when: COMMON_OBJECT_STORE_LOG_SYNC + +- name: Force logrotate on supervisor stop + template: + src: etc/init/sync-on-stop.conf.j2 + dest: /etc/init/sync-on-stop.conf + owner: root + group: root + mode: 0644 + when: COMMON_OBJECT_STORE_LOG_SYNC + and (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty') + +# With systemd, log rotation should be run before the `rsyslog` service exits +# to ensure that all logs are compressed and synced before shutting down the server. +- name: Add systemd service for syncing logs on exit + template: + src: etc/systemd/system/sync-logs-on-exit.service.j2 + dest: /etc/systemd/system/sync-logs-on-exit.service + owner: root + group: root + mode: 0644 + when: COMMON_OBJECT_STORE_LOG_SYNC + and not (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty') + +- name: Enable systemd service for syncing logs on exit + systemd: + name: sync-logs-on-exit + enabled: yes + daemon_reload: yes + when: COMMON_OBJECT_STORE_LOG_SYNC + and not (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty') + +- name: Set maximum disk space usage, free space, retention, and file age for systemd journal + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^#?{{ item.regexp }}' + line: '{{ item.line }}' + state: present + with_items: + - { regexp: 'SystemMaxUse=', line: 'SystemMaxUse=500M' } + - { regexp: 'SystemKeepFree=', line: 'SystemKeepFree=1G' } + - { regexp: 'MaxRetentionSec=', line: 'MaxRetentionSec=1month' } + - { regexp: 'MaxFileSec=', line: 'MaxFileSec=1week' } + register: journald_config_line + when: ansible_distribution_release == 'bionic' or ansible_distribution_release == 'focal' + +- name: Restart systemd-journald (ubuntu/debian) + service: + name: systemd-journald + state: restarted + when: > + journald_config_line.changed and ansible_distribution in common_debian_variants + +- name: Update /etc/dhcp/dhclient.conf + template: + src: etc/dhcp/dhclient.conf.j2 + dest: /etc/dhcp/dhclient.conf + when: COMMON_CUSTOM_DHCLIENT_CONFIG + +- name: Rerun dhclient to apply template + shell: dhclient -n + when: COMMON_CUSTOM_DHCLIENT_CONFIG + +- name: Copy the MOTD template in place + template: + src: etc/motd.tail.j2 + dest: /etc/motd.tail + owner: root + group: root + mode: '755' + +- name: Add motd.tail support for 16.04 + copy: + dest: "/etc/update-motd.d/75-motd-tail" + content: "#!/bin/sh\necho\ncat /etc/motd.tail\n" + force: true + owner: root + group: root + mode: "0755" + when: ansible_distribution_release == 'xenial' + +- name: Update sshd logging to VERBOSE + lineinfile: + dest: /etc/ssh/sshd_config + state: present + regexp: "^LogLevel .*" + line: "LogLevel VERBOSE" + register: sshd_config_line1 + +- name: Update sshd logging to VERBOSE + lineinfile: + dest: /etc/ssh/sshd_config + state: present + regexp: "^PasswordAuthentication .*" + line: "PasswordAuthentication {{ COMMON_SSH_PASSWORD_AUTH }}" + register: sshd_config_line2 + +- name: Restart ssh (ubuntu/debian) + service: + name: ssh + state: restarted + become: True + when: > + (sshd_config_line1.changed or sshd_config_line2.changed) and + ansible_distribution in common_debian_variants + +- name: Restart ssh (redhat) + service: + name: sshd + state: restarted + become: True + when: > + (sshd_config_line1.changed or sshd_config_line2.changed) and + ansible_distribution in common_redhat_variants diff --git a/playbooks/roles/common/templates/etc/dhcp/dhclient.conf.j2 b/playbooks/roles/vhost/templates/etc/dhcp/dhclient.conf.j2 similarity index 94% rename from playbooks/roles/common/templates/etc/dhcp/dhclient.conf.j2 rename to playbooks/roles/vhost/templates/etc/dhcp/dhclient.conf.j2 index 953c2c78076..2fb75dd35c8 100644 --- a/playbooks/roles/common/templates/etc/dhcp/dhclient.conf.j2 +++ b/playbooks/roles/vhost/templates/etc/dhcp/dhclient.conf.j2 @@ -56,7 +56,7 @@ request subnet-mask, broadcast-address, time-offset, routers, # expire 2 2000/1/12 00:00:01; #} -interface "eth0" { - prepend domain-search {% for search in COMMON_DHCLIENT_DNS_SEARCH -%}"{{search}}"{%- if not loop.last -%},{%- endif -%} +interface "{{ ansible_default_ipv4.interface }}" { + prepend domain-search {% for search in COMMON_DHCLIENT_DNS_SEARCH -%}"{{ search }}"{%- if not loop.last -%},{%- endif -%} {%- endfor -%}; } diff --git a/playbooks/roles/vhost/templates/etc/init/sync-on-stop.conf.j2 b/playbooks/roles/vhost/templates/etc/init/sync-on-stop.conf.j2 new file mode 100644 index 00000000000..f3117d47d21 --- /dev/null +++ b/playbooks/roles/vhost/templates/etc/init/sync-on-stop.conf.j2 @@ -0,0 +1,5 @@ +start on stopped supervisor +description "sync tracking logs on supervisor shutdown" +script + "{{ COMMON_OBJECT_STORE_LOG_SYNC_ON_EXIT }}" +end script diff --git a/playbooks/roles/vhost/templates/etc/motd.tail.j2 b/playbooks/roles/vhost/templates/etc/motd.tail.j2 new file mode 100644 index 00000000000..0e48a00b8fe --- /dev/null +++ b/playbooks/roles/vhost/templates/etc/motd.tail.j2 @@ -0,0 +1,18 @@ +******************************************************************* +* ___ _ __ __ * +* / _ \ _ __ ___ _ _ ___ __| |\ \/ / (R) * +* | |_| | '_ \ -_) ' \ / -_) _` | > < * +* \___/| .__/___|_|_| \___\__,_|/_/\_\ * +* |_| * +* * +* This system is for the use of authorized users only. Usage of * +* this system may be monitored and recorded by system personnel. * +* * +* Anyone using this system expressly consents to such monitoring * +* and is advised that if such monitoring reveals possible * +* evidence of criminal activity, system personnel may provide the * +* evidence from such monitoring to law enforcement officials. * +* * +* Need help? https://open.edx.org/getting-help * +* * +******************************************************************* diff --git a/playbooks/roles/vhost/templates/etc/systemd/system/sync-logs-on-exit.service.j2 b/playbooks/roles/vhost/templates/etc/systemd/system/sync-logs-on-exit.service.j2 new file mode 100644 index 00000000000..7e171484104 --- /dev/null +++ b/playbooks/roles/vhost/templates/etc/systemd/system/sync-logs-on-exit.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=Synchronizes logs on exit +After=rsyslog.service + +[Service] +Type=oneshot +RemainAfterExit=true +ExecStop=/edx/bin/sync-logs-on-exit + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/vhost/templates/sync-logs-on-exit.j2 b/playbooks/roles/vhost/templates/sync-logs-on-exit.j2 new file mode 100644 index 00000000000..9629744538a --- /dev/null +++ b/playbooks/roles/vhost/templates/sync-logs-on-exit.j2 @@ -0,0 +1,4 @@ +#!/bin/bash + +/usr/sbin/logrotate -f /etc/logrotate.d/hourly/tracking.log +/usr/sbin/logrotate -f /etc/logrotate.d/hourly/edx-services diff --git a/playbooks/roles/xqueue/defaults/main.yml b/playbooks/roles/xqueue/defaults/main.yml index 50cb31da62e..2fa2f610398 100644 --- a/playbooks/roles/xqueue/defaults/main.yml +++ b/playbooks/roles/xqueue/defaults/main.yml @@ -1,21 +1,12 @@ # variables common to the xqueue role, automatically loaded # when the role is included --- -XQUEUE_NGINX_PORT: 18040 +XQUEUE_SETTINGS: 'production' -xqueue_app_dir: "{{ COMMON_APP_DIR }}/xqueue" -xqueue_code_dir: "{{ xqueue_app_dir }}/xqueue" -xqueue_data_dir: "{{ COMMON_DATA_DIR }}/xqueue" -xqueue_venvs_dir: "{{ xqueue_app_dir }}/venvs" -xqueue_venv_dir: "{{ xqueue_venvs_dir }}/xqueue" -xqueue_venv_bin: "{{ xqueue_venv_dir }}/bin" -xqueue_user: "xqueue" - -# Default nginx listen port -# These should be overrided if you want -# to serve all content on port 80 -xqueue_gunicorn_port: 8040 -xqueue_gunicorn_host: 127.0.0.1 +XQUEUE_NGINX_PORT: 18040 +XQUEUE_NGINX_SSL_PORT: 18041 +XQUEUE_GUNICORN_WORKERS_EXTRA: "" +XQUEUE_GUNICORN_WORKERS_EXTRA_CONF: "" XQUEUE_QUEUES: # push queue @@ -24,20 +15,37 @@ XQUEUE_QUEUES: 'test-pull': !!null 'certificates': !!null 'open-ended': !!null + 'open-ended-message': !!null + XQUEUE_LOGGING_ENV: sandbox XQUEUE_SYSLOG_SERVER: 'localhost' -XQUEUE_S3_BUCKET : 'sandbox-bucket' -XQUEUE_S3_PATH_PREFIX: 'sandbox-xqueue' + +XQUEUE_UPLOAD_BUCKET: 'sandbox-bucket' +XQUEUE_UPLOAD_PATH_PREFIX: 'sandbox-xqueue' +# You can set this to S3 or Swift, but it will +# default to the django file storage for tests/devstacks +XQUEUE_DEFAULT_FILE_STORAGE: !!null + XQUEUE_LOCAL_LOGLEVEL: 'INFO' -XQUEUE_AWS_ACCESS_KEY_ID : '' -XQUEUE_AWS_SECRET_ACCESS_KEY : '' -XQUEUE_BASIC_AUTH_USER: 'edx' -XQUEUE_BASIC_AUTH_PASSWORD: 'edx' + +# If you try to use an Instance IAM Role rather than a user key S3 will invalidate the signed +# URLs used in uploaded file submissions. If you don't upload files on grade submissions, then +# it's ok, the submission is stored directly in the database. +XQUEUE_AWS_ACCESS_KEY_ID : !!null +XQUEUE_AWS_SECRET_ACCESS_KEY : !!null + +XQUEUE_SWIFT_USERNAME: !!null +XQUEUE_SWIFT_KEY: !!null +XQUEUE_SWIFT_TENANT_ID: !!null +XQUEUE_SWIFT_TENANT_NAME: !!null +XQUEUE_SWIFT_AUTH_URL: !!null +XQUEUE_SWIFT_AUTH_VERSION: !!null +XQUEUE_SWIFT_REGION_NAME: !!null + +XQUEUE_BASIC_AUTH_USER: "{{ COMMON_HTPASSWD_USER }}" +XQUEUE_BASIC_AUTH_PASSWORD: "{{ COMMON_HTPASSWD_PASS }}" XQUEUE_DJANGO_USERS: - lms: 'password' -XQUEUE_RABBITMQ_USER: 'edx' -XQUEUE_RABBITMQ_PASS: 'edx' -XQUEUE_RABBITMQ_HOSTNAME: 'localhost' + lms: "{{ COMMON_XQUEUE_LMS_PASSWORD }}" XQUEUE_LANG: 'en_US.UTF-8' XQUEUE_MYSQL_DB_NAME: 'xqueue' @@ -45,39 +53,121 @@ XQUEUE_MYSQL_USER: 'xqueue001' XQUEUE_MYSQL_PASSWORD: 'password' XQUEUE_MYSQL_HOST: 'localhost' XQUEUE_MYSQL_PORT: '3306' +XQUEUE_MYSQL_OPTIONS: {} +XQUEUE_MYSQL_CONN_MAX_AGE: 0 # This is Django's default https://docs.djangoproject.com/en/1.11/ref/settings/#conn-max-age + +XQUEUE_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-xqueue" +XQUEUE_CONSUMER_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-xqueue_consumer" + +XQUEUE_CONSUMER_DATADOG_APPNAME: "xqueue_consumer" +XQUEUE_DATADOG_APPNAME: "{{ xqueue_service_name }}" + +# Set the number of gunicorn front end workers explicitely for xqueue +XQUEUE_WORKERS: !!null + +XQUEUE_SESSION_ENGINE: !!null +XQUEUE_CACHES: !!null + +# Number of minutes to ignore a pulled/pushed submission before making it available again +XQUEUE_SUBMISSION_PROCESSING_DELAY: 1 +# Number of seconds to delay before querying for available push submissions +XQUEUE_CONSUMER_DELAY: 10 -xqueue_env_config: - XQUEUES: $XQUEUE_QUEUES - XQUEUE_WORKERS_PER_QUEUE: 12 - LOGGING_ENV : $XQUEUE_LOGGING_ENV - SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER - LOG_DIR : "{{ COMMON_DATA_DIR }}/logs/xqueue" - RABBIT_HOST : $XQUEUE_RABBITMQ_HOSTNAME - S3_BUCKET : $XQUEUE_S3_BUCKET - S3_PATH_PREFIX: $XQUEUE_S3_PATH_PREFIX - LOCAL_LOGLEVEL: $XQUEUE_LOCAL_LOGLEVEL - -xqueue_auth_config: - AWS_ACCESS_KEY_ID: $XQUEUE_AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY: $XQUEUE_AWS_SECRET_ACCESS_KEY - REQUESTS_BASIC_AUTH: [$XQUEUE_BASIC_AUTH_USER, $XQUEUE_BASIC_AUTH_PASSWORD] - USERS: $XQUEUE_DJANGO_USERS +XQUEUE_CSRF_COOKIE_SECURE: false +XQUEUE_SESSION_COOKIE_SECURE: false + +# This dictionary is defined in XQueue's settings.py +# If you want to set up cloudwatch metrics/alarms this allows +# you a custom setting. +XQUEUE_CLOUDWATCH_QUEUE_COUNT_METRICS: !!null + +# Remote config +XQUEUE_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +# This block of config is dropped into /edx/etc/xqueue.yml +# and is read in by xqueue.XQUEUE_SETTINGS +XQUEUE_CONFIG: + XQUEUES: "{{ XQUEUE_QUEUES }}" + LOGGING_ENV: "{{ XQUEUE_LOGGING_ENV }}" + SYSLOG_SERVER: "{{ XQUEUE_SYSLOG_SERVER }}" + LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/xqueue" + SESSION_ENGINE: "{{ XQUEUE_SESSION_ENGINE }}" + CACHES: "{{ XQUEUE_CACHES }}" + LOCAL_LOGLEVEL: "{{ XQUEUE_LOCAL_LOGLEVEL }}" + UPLOAD_BUCKET: "{{ XQUEUE_UPLOAD_BUCKET }}" + UPLOAD_PATH_PREFIX: "{{ XQUEUE_UPLOAD_PATH_PREFIX }}" + DEFAULT_FILE_STORAGE: "{{ XQUEUE_DEFAULT_FILE_STORAGE }}" + NEWRELIC_APPNAME: "{{ XQUEUE_NEWRELIC_APPNAME }}" + SUBMISSION_PROCESSING_DELAY: "{{ XQUEUE_SUBMISSION_PROCESSING_DELAY }}" + CONSUMER_DELAY: "{{ XQUEUE_CONSUMER_DELAY }}" + AWS_ACCESS_KEY_ID: "{{ XQUEUE_AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "{{ XQUEUE_AWS_SECRET_ACCESS_KEY }}" + SWIFT_USERNAME: "{{ XQUEUE_SWIFT_USERNAME }}" + SWIFT_KEY: "{{ XQUEUE_SWIFT_KEY }}" + SWIFT_TENANT_ID: "{{ XQUEUE_SWIFT_TENANT_ID }}" + SWIFT_TENANT_NAME: "{{ XQUEUE_SWIFT_TENANT_NAME }}" + SWIFT_AUTH_URL: "{{ XQUEUE_SWIFT_AUTH_URL }}" + SWIFT_AUTH_VERSION: "{{ XQUEUE_SWIFT_AUTH_VERSION }}" + SWIFT_REGION_NAME: "{{ XQUEUE_SWIFT_REGION_NAME }}" + # This is used by the xqueue consumer in case it needs to send a password protected submission out for a push grader. + REQUESTS_BASIC_AUTH: ["{{ XQUEUE_BASIC_AUTH_USER }}", "{{XQUEUE_BASIC_AUTH_PASSWORD}}"] + USERS: "{{ XQUEUE_DJANGO_USERS }}" DATABASES: default: ENGINE: "django.db.backends.mysql" - NAME: $XQUEUE_MYSQL_DB_NAME - USER: $XQUEUE_MYSQL_USER - PASSWORD: $XQUEUE_MYSQL_PASSWORD - HOST: $XQUEUE_MYSQL_HOST - PORT: $XQUEUE_MYSQL_PORT - RABBITMQ_USER: $XQUEUE_RABBITMQ_USER - RABBITMQ_PASS: $XQUEUE_RABBITMQ_PASS + NAME: "{{ XQUEUE_MYSQL_DB_NAME }}" + USER: "{{ XQUEUE_MYSQL_USER }}" + PASSWORD: "{{ XQUEUE_MYSQL_PASSWORD }}" + HOST: "{{ XQUEUE_MYSQL_HOST }}" + PORT: "{{ XQUEUE_MYSQL_PORT }}" + ATOMIC_REQUESTS: True + CONN_MAX_AGE: "{{ XQUEUE_MYSQL_CONN_MAX_AGE }}" + OPTIONS: "{{ XQUEUE_MYSQL_OPTIONS }}" + NEWRELIC_LICENSE_KEY: "{{ NEWRELIC_LICENSE_KEY | default('') }}" + CLOUDWATCH_QUEUE_COUNT_METRICS: "{{ XQUEUE_CLOUDWATCH_QUEUE_COUNT_METRICS }}" + SESSION_COOKIE_SECURE: "{{ XQUEUE_SESSION_COOKIE_SECURE }}" + CSRF_COOKIE_SECURE: "{{ XQUEUE_CSRF_COOKIE_SECURE }}" + +XQUEUE_VERSION: "master" +XQUEUE_GIT_IDENTITY: !!null -xqueue_source_repo: https://github.com/edx/xqueue.git -xqueue_version: 'HEAD' -xqueue_pre_requirements_file: "{{ xqueue_code_dir }}/pre-requirements.txt" -xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt" +XQUEUE_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: xqueue.git + VERSION: "{{ XQUEUE_VERSION }}" + DESTINATION: "{{ xqueue_code_dir }}" + SSH_KEY: "{{ XQUEUE_GIT_IDENTITY }}" +# Internal vars below this line +############################################# + +xqueue_service_name: "xqueue" + +xqueue_app_dir: "{{ COMMON_APP_DIR }}/xqueue" +xqueue_code_dir: "{{ xqueue_app_dir }}/xqueue" +xqueue_data_dir: "{{ COMMON_DATA_DIR }}/xqueue" +xqueue_venvs_dir: "{{ xqueue_app_dir }}/venvs" +xqueue_venv_dir: "{{ xqueue_venvs_dir }}/xqueue" +xqueue_venv_bin: "{{ xqueue_venv_dir }}/bin" + +xqueue_user: "xqueue" +xqueue_home: "{{ COMMON_APP_DIR }}/{{ xqueue_service_name }}" + +# Default nginx listen port +# These should be overrided if you want +# to serve all content on port 80 +xqueue_gunicorn_port: 8040 +xqueue_gunicorn_host: 127.0.0.1 + +xqueue_environment: + DJANGO_SETTINGS_MODULE: 'xqueue.{{ XQUEUE_SETTINGS }}' + PATH: '{{ xqueue_venv_bin }}:{{ ansible_env.PATH }}' + XQUEUE_CFG: '{{ COMMON_CFG_DIR }}/xqueue.yml' + +xqueue_requirements_file: "{{ xqueue_code_dir }}/requirements.txt" +xqueue_openstack_requirements_file: "{{ xqueue_code_dir }}/openstack-requirements.txt" # These packages are required for the xqueue server, # copied from the LMS role for now since there is a lot @@ -87,18 +177,24 @@ xqueue_debian_pkgs: # (only needed if wheel files aren't available) - build-essential - s3cmd - - pkg-config - - graphviz-dev - - graphviz + # mysqlclient wont install without this + - libssl-dev - libmysqlclient-dev - # apparmor - - apparmor-utils - # misc - - curl - - ipython - - npm - ntp - # for shapely - - libgeos-dev # Needed to be able to create the xqueue mysqldb. - - python-mysqldb + # Needed to be able to build wheel for mysqlclient + - python3-dev + # Needed for mysqlcient==2.2.0 python pacakge + - pkg-config +xqueue_release_specific_debian_pkgs: + xenial: + - python-mysqldb + bionic: + - python-mysqldb + focal: + - python3-mysqldb + +# flag to run xqueue on python3 +xqueue_use_python3: false +# flag to run xqueue on python3.8 +xqueue_use_python38: true diff --git a/playbooks/roles/xqueue/handlers/main.yml b/playbooks/roles/xqueue/handlers/main.yml deleted file mode 100644 index 4ad8abd881e..00000000000 --- a/playbooks/roles/xqueue/handlers/main.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: restart xqueue - supervisorctl_local: > - name={{ item }} - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - when: xqueue_installed is defined - with_items: - - xqueue - - xqueue_consumer diff --git a/playbooks/roles/xqueue/meta/main.yml b/playbooks/roles/xqueue/meta/main.yml index 107f1e98c29..bf4808bc61d 100644 --- a/playbooks/roles/xqueue/meta/main.yml +++ b/playbooks/roles/xqueue/meta/main.yml @@ -1,3 +1,21 @@ --- dependencies: + - common - supervisor + - role: supervisor + supervisor_spec: + - service: "{{ xqueue_service_name }}" + migration_check_services: "{{ xqueue_service_name }}" + python: "{{ xqueue_venv_bin }}/python" + code: "{{ xqueue_code_dir | default(None) }}" + env: "{{ xqueue_app_dir | default(none) }}/xqueue_env" + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ xqueue_service_name }}" + edx_service_with_rendered_config_service_config: "{{ XQUEUE_CONFIG }}" + edx_service_with_rendered_config_filter_nones: true + edx_service_with_rendered_config_repos: "{{ XQUEUE_REPOS }}" + edx_service_with_rendered_config_user: "{{ xqueue_user }}" + edx_service_with_rendered_config_home: "{{ xqueue_home }}" + edx_service_with_rendered_config_packages: + debian: "{{ xqueue_debian_pkgs + xqueue_release_specific_debian_pkgs[ansible_distribution_release] }}" + redhat: [] diff --git a/playbooks/roles/xqueue/tasks/deploy.yml b/playbooks/roles/xqueue/tasks/deploy.yml deleted file mode 100644 index 7270c86a631..00000000000 --- a/playbooks/roles/xqueue/tasks/deploy.yml +++ /dev/null @@ -1,86 +0,0 @@ -- name: "writing supervisor scripts - xqueue, xqueue consumer" - template: > - src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf - owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 - with_items: ['xqueue', 'xqueue_consumer'] - - -- name: create xqueue application config - template: src=xqueue.env.json.j2 dest={{ xqueue_app_dir }}/xqueue.env.json mode=0644 - sudo_user: "{{ xqueue_user }}" - notify: - - restart xqueue - -- name: create xqueue auth file - template: src=xqueue.auth.json.j2 dest={{ xqueue_app_dir }}/xqueue.auth.json mode=0644 - sudo_user: "{{ xqueue_user }}" - notify: - - restart xqueue - -# Do A Checkout -- name: git checkout xqueue repo into xqueue_code_dir - git: dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }} - sudo_user: "{{ xqueue_user }}" - notify: - - restart xqueue - - -# Install the python pre requirements into {{ xqueue_venv_dir }} -- name : install python pre-requirements - pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present - sudo_user: "{{ xqueue_user }}" - notify: - - restart xqueue - -# Install the python post requirements into {{ xqueue_venv_dir }} -- name : install python post-requirements - pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present - sudo_user: "{{ xqueue_user }}" - notify: - - restart xqueue - -- name: syncdb and migrate - shell: > - SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} - when: migrate_db is defined and migrate_db|lower == "yes" - sudo_user: "{{ xqueue_user }}" - notify: - - restart xqueue - -- name: create users - shell: > - SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} - sudo_user: "{{ xqueue_user }}" - notify: - - restart xqueue - - # call supervisorctl update. this reloads - # the supervisorctl config and restarts - # the services if any of the configurations - # have changed. - # -- name: update supervisor configuration - shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" - register: supervisor_update - changed_when: supervisor_update.stdout != "" - -- name: ensure xqueue, consumer is running - supervisorctl_local: > - name={{ item }} - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - with_items: - - xqueue - - xqueue_consumer - -- name: create a symlink for venv python - file: > - src="http://github.com/%7B%7B%20xqueue_venv_bin%20%7D%7D/%7B%7B%20item%20%7D%7D" - dest={{ COMMON_BIN_DIR }}/{{ item }}.xqueue - state=link - with_items: - - python - - pip - -- set_fact: xqueue_installed=true diff --git a/playbooks/roles/xqueue/tasks/main.yml b/playbooks/roles/xqueue/tasks/main.yml index 0666c40b7ac..8fc4676a1eb 100644 --- a/playbooks/roles/xqueue/tasks/main.yml +++ b/playbooks/roles/xqueue/tasks/main.yml @@ -1,39 +1,276 @@ -# requires: -# - group_vars/all -# - common/tasks/main.yml --- -# Check out xqueue repo to {{xqueue_code_dir}} -# -# +######## BEGIN PYTHON3 ######## +- name: add deadsnakes repo + apt_repository: + repo: ppa:deadsnakes/ppa + update_cache: yes + when: xqueue_use_python38 -- name: create application user - user: > - name="{{ xqueue_user }}" - home="{{ xqueue_app_dir }}" - createhome=no - shell=/bin/false - notify: - - restart xqueue +- name: install python3.8 + apt: + name: "{{ item }}" + when: xqueue_use_python38 + with_items: + - python3.8-dev + - python3.8-distutils + tags: + - install + - install:system-requirements + +- name: install python3 + apt: + name: "{{ item }}" + when: xqueue_use_python3 + with_items: + - python3-pip + - python3-dev + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3.8 + command: "virtualenv --python=python3.8 {{ xqueue_venv_dir }}" + args: + creates: "{{ xqueue_venv_dir }}/bin/pip" + become_user: "{{ xqueue_user }}" + when: xqueue_use_python38 + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=python3 {{ xqueue_venv_dir }}" + args: + creates: "{{ xqueue_venv_dir }}/bin/pip" + become_user: "{{ xqueue_user }}" + when: xqueue_use_python3 + tags: + - install + - install:system-requirements + +- name: "Install python3.8 requirements" + pip: + requirements: "{{ xqueue_requirements_file }}" + virtualenv: "{{ xqueue_venv_dir }}" + virtualenv_python: 'python3.8' + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + become_user: "{{ xqueue_user }}" + when: xqueue_use_python38 + tags: + - install + - install:app-requirements + +- name: "Install python3 requirements" + pip: + requirements: "{{ xqueue_requirements_file }}" + virtualenv: "{{ xqueue_venv_dir }}" + virtualenv_python: 'python3.5' + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + become_user: "{{ xqueue_user }}" + when: xqueue_use_python3 + tags: + - install + - install:app-requirements + +- name: "Install Datadog APM requirements" + when: COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP + pip: + name: + - ddtrace + extra_args: "--exists-action w" + virtualenv: "{{ xqueue_venv_dir }}" + state: present + become_user: "{{ xqueue_user }}" + tags: + - install + - install:app-requirements + +######## END PYTHON3 ######## + +- name: build virtualenv with python2.7 + command: "virtualenv --python=python2.7 {{ xqueue_venv_dir }}" + args: + creates: "{{ xqueue_venv_dir }}/bin/pip" + become_user: "{{ xqueue_user }}" + when: not xqueue_use_python3 and not xqueue_use_python38 + tags: + - install + - install:system-requirements + +- name: write devstack script + template: + src: "devstack.sh.j2" + dest: "{{ xqueue_app_dir }}/devstack.sh" + owner: "{{ xqueue_user }}" + group: "{{ common_web_group }}" + mode: 0744 + when: devstack is defined and devstack + tags: + - devstack + - devstack:install -- name: create xqueue app and venv dir - file: > - path="{{ item }}" - state=directory - owner="{{ xqueue_user }}" - group="{{ common_web_group }}" - notify: - - restart xqueue +- name: "Writing supervisor scripts - xqueue, xqueue consumer" + template: + src: "{{ item }}.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ item }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0644" with_items: - - "{{ xqueue_app_dir }}" - - "{{ xqueue_venvs_dir }}" + - xqueue + - xqueue_consumer + tags: + - install + - install:configuration -- name: install a bunch of system packages on which xqueue relies - apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present - notify: - - restart xqueue +- name: "Enabling supervisor scripts - xqueue, xqueue consumer" + file: + src: "{{ supervisor_available_dir }}/{{ item }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ item }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0644" + state: link + force: yes + with_items: + - xqueue + - xqueue_consumer + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: add gunicorn configuration file + template: + src: "xqueue_gunicorn.py.j2" + dest: "{{ xqueue_app_dir }}/xqueue_gunicorn.py" + become_user: "{{ xqueue_user }}" + tags: + - install + - install:configuration + +- name: setup the app env file + template: + src: "xqueue_env.j2" + dest: "{{ xqueue_app_dir }}/xqueue_env" + owner: "{{ xqueue_user }}" + group: "{{ common_web_group }}" + mode: 0644 + tags: + - install + - install:configuration -- include: deploy.yml tags=deploy +# Install the python requirements into {{ xqueue_venv_dir }} +- name: "Install python requirements" + pip: + requirements: "{{ xqueue_requirements_file }}" + virtualenv: "{{ xqueue_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + become_user: "{{ xqueue_user }}" + when: not xqueue_use_python3 and not xqueue_use_python38 + tags: + - install + - install:app-requirements +# If this is an openstack deployment, install openstack requirements +- name: Install python openstack requirements + pip: + requirements: "{{ xqueue_openstack_requirements_file }}" + virtualenv: "{{ xqueue_venv_dir }}" + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + become_user: "{{ xqueue_user }}" + when: XQUEUE_SETTINGS == 'openstack_settings' + tags: + - install + - install:app-requirements +# If there is a common user for migrations run migrations using his username +# and credentials. If not we use the xqueue mysql user +- name: Migrate + shell: "{{ xqueue_venv_bin }}/django-admin migrate --noinput --settings=xqueue.{{ XQUEUE_SETTINGS }} --pythonpath={{ xqueue_code_dir }}" + become_user: "{{ xqueue_user }}" + environment: + DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}" + DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}" + XQUEUE_CFG: '{{ COMMON_CFG_DIR }}/xqueue.yml' + when: migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS + run_once: yes + tags: + - migrate + - migrate:db +- name: Create users + shell: "{{ xqueue_venv_bin }}/django-admin update_users --settings=xqueue.{{ XQUEUE_SETTINGS }} --pythonpath={{ xqueue_code_dir }}" + become_user: "{{ xqueue_user }}" + environment: + XQUEUE_CFG: '{{ COMMON_CFG_DIR }}/xqueue.yml' + when: not disable_edx_services + tags: + - manage + - manage:app-users + + # call supervisorctl update. this reloads + # the supervisorctl config and restarts + # the services if any of the configurations + # have changed. + # +- name: Update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + register: supervisor_update + changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" + when: not disable_edx_services + +- name: Ensure xqueue, consumer is running + supervisorctl: + name: "{{ item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + with_items: + - xqueue + - xqueue_consumer + when: not disable_edx_services + tags: + - manage + - manage:start +- name: Create a symlink for venv commands + file: + src: "{{ xqueue_venv_bin }}/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item }}.xqueue" + state: link + with_items: + - python + - pip + tags: + - install + - install:app-requirements + +- name: Create symlinks for repo commands + file: + src: "{{ xqueue_code_dir }}/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.xqueue" + state: link + with_items: + - manage.py + tags: + - install + - install:app-requirements + +- name: Restart xqueue + supervisorctl: + name: "{{ item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: not disable_edx_services + with_items: + - xqueue + - xqueue_consumer + tags: + - manage + - manage:start diff --git a/playbooks/roles/xqueue/templates/devstack.sh.j2 b/playbooks/roles/xqueue/templates/devstack.sh.j2 new file mode 100644 index 00000000000..31b6eb4b9b3 --- /dev/null +++ b/playbooks/roles/xqueue/templates/devstack.sh.j2 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . {{ xqueue_venv_bin }}/activate + cd {{ xqueue_code_dir }} + + /bin/bash + ;; + exec) + shift + + . {{ xqueue_venv_bin }}/activate + cd {{ xqueue_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/xqueue/templates/xqueue.auth.json.j2 b/playbooks/roles/xqueue/templates/xqueue.auth.json.j2 deleted file mode 100644 index 1e65295aac5..00000000000 --- a/playbooks/roles/xqueue/templates/xqueue.auth.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ xqueue_auth_config | to_nice_json }} diff --git a/playbooks/roles/xqueue/templates/xqueue.conf.j2 b/playbooks/roles/xqueue/templates/xqueue.conf.j2 index 1cdc0de846d..2eeb446058f 100644 --- a/playbooks/roles/xqueue/templates/xqueue.conf.j2 +++ b/playbooks/roles/xqueue/templates/xqueue.conf.j2 @@ -1,17 +1,25 @@ [program:xqueue] -{% if ansible_processor|length > 0 %} -command={{ xqueue_venv_bin }}/gunicorn --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = xqueue_venv_bin + '/newrelic-admin run-program ' + xqueue_venv_bin + '/gunicorn' %} {% else %} -command={{ xqueue_venv_bin }}/gunicorn --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi +{% set executable = xqueue_venv_bin + '/gunicorn' %} {% endif %} +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = xqueue_venv_bin + '/ddtrace-run ' + executable %} +{% endif -%} + +command={{ executable }} -c {{ xqueue_app_dir }}/xqueue_gunicorn.py {{ XQUEUE_GUNICORN_WORKERS_EXTRA }} xqueue.wsgi + user={{ common_web_user }} directory={{ xqueue_code_dir }} -environment=PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.aws_settings,SERVICE_VARIANT="xqueue" +# Copied DD_TRACE_LOG_STREAM_HANDLER config from edx_django_service. This is required +# to disable Datadog trace debug logging. +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ XQUEUE_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %}DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true,DD_TAGS=service:{{ XQUEUE_DATADOG_APPNAME }},DD_TRACE_LOG_STREAM_HANDLER=false,{% endif -%}PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.{{ XQUEUE_SETTINGS }},XQUEUE_CFG={{ COMMON_CFG_DIR }}/xqueue.yml -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log killasgroup=true stopasgroup=true diff --git a/playbooks/roles/xqueue/templates/xqueue.env.json.j2 b/playbooks/roles/xqueue/templates/xqueue.env.json.j2 deleted file mode 100644 index c6fb539360c..00000000000 --- a/playbooks/roles/xqueue/templates/xqueue.env.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ xqueue_env_config | to_nice_json }} diff --git a/playbooks/roles/xqueue/templates/xqueue_consumer.conf.j2 b/playbooks/roles/xqueue/templates/xqueue_consumer.conf.j2 index d93f25e5d08..7d7f951641a 100644 --- a/playbooks/roles/xqueue/templates/xqueue_consumer.conf.j2 +++ b/playbooks/roles/xqueue/templates/xqueue_consumer.conf.j2 @@ -1,14 +1,26 @@ [program:xqueue_consumer] -command={{xqueue_venv_bin}}/django-admin.py run_consumer --pythonpath={{xqueue_code_dir}} --settings=xqueue.aws_settings $WORKERS_PER_QUEUE +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = xqueue_venv_bin + '/newrelic-admin run-program ' + xqueue_venv_bin + '/django-admin run_consumer' %} +{% else %} +{% set executable = xqueue_venv_bin + '/django-admin run_consumer' %} +{% endif %} + +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = xqueue_venv_bin + '/ddtrace-run ' + executable %} +{% endif -%} + +command={{ executable }} --pythonpath={{ xqueue_code_dir }} --settings=xqueue.{{ XQUEUE_SETTINGS }} user={{ common_web_user }} directory={{ xqueue_code_dir }} -environment=LANG={{ XQUEUE_LANG }},WORKERS_PER_QUEUE={{xqueue_env_config.XQUEUE_WORKERS_PER_QUEUE}},SERVICE_VARIANT="xqueue" +# Copied DD_TRACE_LOG_STREAM_HANDLER config from edx_django_service. This is required +# to disable Datadog trace debug logging. +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_STARTUP_TIMEOUT=10,NEW_RELIC_APP_NAME={{ XQUEUE_CONSUMER_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %}DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true,DD_TAGS=service:{{ XQUEUE_CONSUMER_DATADOG_APPNAME }},DD_TRACE_LOG_STREAM_HANDLER=false,{% endif -%}LANG={{ XQUEUE_LANG }},XQUEUE_CFG={{ COMMON_CFG_DIR }}/xqueue.yml -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log killasgroup=true stopasgroup=true startsecs=0 diff --git a/playbooks/roles/xqueue/templates/xqueue_env.j2 b/playbooks/roles/xqueue/templates/xqueue_env.j2 new file mode 100644 index 00000000000..3ce4a2de08d --- /dev/null +++ b/playbooks/roles/xqueue/templates/xqueue_env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% for name,value in xqueue_environment.items() -%} +{%- if value -%} +export {{ name }}="{{ value }}" +{% endif %} +{%- endfor %} diff --git a/playbooks/roles/xqueue/templates/xqueue_gunicorn.py.j2 b/playbooks/roles/xqueue/templates/xqueue_gunicorn.py.j2 new file mode 100644 index 00000000000..4a50eb69182 --- /dev/null +++ b/playbooks/roles/xqueue/templates/xqueue_gunicorn.py.j2 @@ -0,0 +1,27 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html + +{{ ansible_managed }} +""" +import multiprocessing + +preload_app = True +timeout = 300 +bind = "{{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }}" +pythonpath = "{{ xqueue_code_dir }}" +limit_request_field_size = 16384 + +{% if XQUEUE_WORKERS %} +workers = {{ XQUEUE_WORKERS }} +{% else %} +workers = (multiprocessing.cpu_count()-1) * 2 + 2 +{% endif %} + +{{ common_pre_request }} + +{{ common_close_all_caches }} + +def post_fork(server, worker): + close_all_caches() + +{{ XQUEUE_GUNICORN_WORKERS_EXTRA_CONF }} diff --git a/playbooks/roles/xqwatcher/defaults/main.yml b/playbooks/roles/xqwatcher/defaults/main.yml new file mode 100644 index 00000000000..2bfe8760ee8 --- /dev/null +++ b/playbooks/roles/xqwatcher/defaults/main.yml @@ -0,0 +1,112 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role xqwatcher +# +# Courses can be defined by overriding XQWATCHER_COURSES with values like so: +# - COURSE: "exampleX-101x" +# GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git" +# GIT_REF: "master" +# PYTHON_EXECUTABLE: python2 +# PYTHON_REQUIREMENTS: [] +# QUEUE_NAME: "exampleX-101x" +# QUEUE_CONFIG: +# SERVER: "https://xqueue.example.com/" +# CONNECTIONS: 5 +# AUTH: ["user", "password"] +# HANDLERS: +# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader" +# CODEJAIL: +# name: "exampleX-101x" +# bin_path: "{{ xqwatcher_venv_base }}/exampleX-101x/bin/python" +# user: "exampleX-101x" +# lang: python2 +# KWARGS: +# grader_root: "../data/exampleX-101x/graders/" +# - COURSE: "exampleX-202x" +# GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git" +# GIT_REF: "master" +# PYTHON_EXECUTABLE: python3 +# PYTHON_REQUIREMENTS: [] +# QUEUE_NAME: "exampleX-202x" +# QUEUE_CONFIG: +# SERVER: "https://xqueue.example.com/" +# CONNECTIONS: 5 +# AUTH: ["user", "password"] +# HANDLERS: +# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader" +# CODEJAIL: +# name: "exampleX-202x" +# bin_path: "{{ xqwatcher_venv_base }}/exampleX-202x/bin/python" +# user: "exampleX-202x" +# lang: python2 +# KWARGS: +# grader_root: "../data/exampleX-202x/graders/" +# +# NB: only python2 and python3 are supported. + +XQWATCHER_CONFIG: + HTTP_BASIC_AUTH: ["{{ COMMON_HTPASSWD_USER }}","{{ COMMON_HTPASSWD_PASS }}"] + POLL_TIME: 10 + +XQWATCHER_COURSES: [] + +XQWATCHER_GIT_IDENTITY: "none" +XQWATCHER_VERSION: "master" + +XQWATCHER_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: "{{ xqwatcher_repo_name }}" + VERSION: "{{ XQWATCHER_VERSION }}" + DESTINATION: "{{ xqwatcher_code_dir }}" + SSH_KEY: "{{ XQWATCHER_GIT_IDENTITY }}" + +# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC +# and a key being provided via NEWRELIC_LICENSE_KEY +XQWATCHER_NEWRELIC_APPNAME: "{{ COMMON_DEPLOYMENT }}-{{ COMMON_ENVIRONMENT }}-xqwatcher" +XQWATCHER_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}" + +XQWATCHER_DATADOG_APPNAME: "{{ xqwatcher_service_name }}" +# +# +# vars are namespace with the module name. +# +xqwatcher_service_name: "xqwatcher" +xqwatcher_user: "{{ xqwatcher_service_name }}" +xqwatcher_app_dir: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}" +xqwatcher_app_data: "{{ xqwatcher_app_dir }}/data" +xqwatcher_code_dir: "{{ xqwatcher_app_dir }}/src" + +xqwatcher_repo_name: xqueue-watcher.git + +xqwatcher_python_version: "python3.8" + +#TODO: change this to /edx/etc after pulling xqwatcher.json out +xqwatcher_conf_dir: "{{ xqwatcher_app_dir }}" + +xqwatcher_requirements_file: "{{ xqwatcher_code_dir }}/requirements/production.txt" +xqwatcher_log_dir: "{{ COMMON_LOG_DIR }}/{{ xqwatcher_service_name }}" +xqwatcher_module: "xqueue_watcher" + +#Do not reference these outside of this file +xqwatcher_venv_base: "{{ xqwatcher_app_dir }}/venvs" +xqwatcher_venv_dir: "{{ xqwatcher_venv_base }}/{{ xqwatcher_service_name }}" + +# +# OS packages +# + +xqwatcher_debian_pkgs: + - apparmor + - apparmor-utils + +xqwatcher_redhat_pkgs: [] diff --git a/playbooks/roles/xqwatcher/meta/main.yml b/playbooks/roles/xqwatcher/meta/main.yml new file mode 100644 index 00000000000..90773791dc1 --- /dev/null +++ b/playbooks/roles/xqwatcher/meta/main.yml @@ -0,0 +1,25 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role xqwatcher +# +# the role name are service name differ by _ and -, the latter isn't safe +# random corners of ansible/jinga/python variable expansion. +dependencies: + - common + - role: supervisor + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ xqwatcher_service_name }}" + edx_service_with_rendered_config_repos: "{{ XQWATCHER_REPOS }}" + edx_service_with_rendered_config_user: "{{ xqwatcher_user }}" + edx_service_with_rendered_config_home: "{{ xqwatcher_app_dir }}" + edx_service_with_rendered_config_packages: + debian: "{{ xqwatcher_debian_pkgs }}" + redhat: "{{ xqwatcher_redhat_pkgs }}" diff --git a/playbooks/roles/xqwatcher/tasks/code_jail.yml b/playbooks/roles/xqwatcher/tasks/code_jail.yml new file mode 100644 index 00000000000..9ef714f5ef3 --- /dev/null +++ b/playbooks/roles/xqwatcher/tasks/code_jail.yml @@ -0,0 +1,136 @@ +--- +# +# Tasks related to deploying the code jail for the XQWatcher +# +- name: Create sandboxed user + user: + name: "{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}" + shell: /bin/false + home: "/dev/null" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:base + +# Do this first so symlinks can be resolved in the next step +- name: Create jail virtualenv + shell: "/usr/local/bin/virtualenv --python={{ item.PYTHON_EXECUTABLE }} {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }} --always-copy" + args: + creates: "{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/pip" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:code + +# +# Need to disable apparmor to update the virutalenv + +# When Apparmor is pointed at a symlink and an application uses the symlink +# target, Apparmor does not guard that execution. +# But when Apparmor is pointed at a real executable and an application uses a +# symlink to that executable, Apparmor DOES guard that execution. +- name: Resolve potential symlinks + shell: readlink -vf {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.bin_path }} + with_items: "{{ XQWATCHER_COURSES }}" + register: xqwatcher_python_real_executables + tags: + - install + - install:configuration + +- name: Write out apparmor config + template: + src: "etc/apparmor.d/code.jail.j2" + dest: "/etc/apparmor.d/code.jail.{{ item.0.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" + owner: root + group: root + mode: "0644" + with_together: + - "{{ XQWATCHER_COURSES }}" + - "{{ xqwatcher_python_real_executables.results }}" + tags: + - install + - install:configuration + +- name: Write out sudoers for watcher + template: + src: "etc/sudoers.d/95-xqwatcher.j2" + dest: "/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user|replace('.', '') }}" + owner: root + group: root + mode: "0440" + validate: 'visudo -c -f %s' + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:configuration + +# see comment below as to why this is skipped. +- name: Put code jail into aa-complain + command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - manage + - manage:sandbox + +- name: Write out requirements.txt + template: + src: "edx/app/xqwatcher/data/requirements.txt.j2" + dest: "{{ xqwatcher_app_dir }}/data/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt" + owner: root + group: root + mode: "0440" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:code + +- name: Install course specific python requirements + shell: "{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/pip install {{ XQWATCHER_PIP_EXTRA_ARGS }} -r {{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:code + +- name: Give other read permissions to the virtualenv + shell: "chown -R {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:code + +- name: Start apparmor service + service: + name: apparmor + state: started + tags: + - manage + - manage:sandbox + +- name: Load code sandbox profile + command: apparmor_parser -r "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - manage + - manage:sandbox + +# +# Leaves aa in either complain or enforce depending upon the value of the +# CODE_JAIL_COMPLAIN var. Complain mode should never be run in an +# environment where untrusted users can submit code +- name: Put code jail into aa-complain + command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" + when: CODE_JAIL_COMPLAIN|bool + with_items: "{{ XQWATCHER_COURSES }}" + # AA having issues on 14.04 + # https://github.com/openedx/codejail/issues/38 + tags: + - manage + - manage:sandbox + +- name: Put code sandbox into aa-enforce + command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" + when: not CODE_JAIL_COMPLAIN|bool + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - manage + - manage:sandbox diff --git a/playbooks/roles/xqwatcher/tasks/deploy.yml b/playbooks/roles/xqwatcher/tasks/deploy.yml new file mode 100644 index 00000000000..f8014ab598d --- /dev/null +++ b/playbooks/roles/xqwatcher/tasks/deploy.yml @@ -0,0 +1,33 @@ +- name: Install courses ssh key + copy: + content: "{{ XQWATCHER_GIT_IDENTITY }}" + dest: "{{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses" + owner: "{{ xqwatcher_user }}" + group: "{{ xqwatcher_user }}" + mode: "0600" + when: XQWATCHER_GIT_IDENTITY != 'none' + tags: + - deploy + - install + - install:code + +#TODO: remove once xqwatcher.json can be pulled out into /edx/etc/ +- name: Write out watcher config file + template: + src: "edx/app/xqwatcher/xqwatcher.json.j2" + dest: "{{ xqwatcher_conf_dir }}/xqwatcher.json" + owner: "{{ xqwatcher_user }}" + group: "{{ xqwatcher_user }}" + mode: "0644" + tags: + - deploy + - install + - install:configuration + +- include: deploy_watcher.yml + tags: + - deploy-watcher + +- include: deploy_courses.yml + tags: + - deploy-courses diff --git a/playbooks/roles/xqwatcher/tasks/deploy_courses.yml b/playbooks/roles/xqwatcher/tasks/deploy_courses.yml new file mode 100644 index 00000000000..5fc741610a6 --- /dev/null +++ b/playbooks/roles/xqwatcher/tasks/deploy_courses.yml @@ -0,0 +1,15 @@ +# Iterates over the data structure documented in tasks/main.yml +# checking out the grader code from the repository specified on +# a per queue basis. + +- name: Checkout grader code + git: + repo: "{{ item.GIT_REPO }}" + dest: "{{ xqwatcher_app_dir }}/data/{{ item.COURSE }}" + version: "{{ item.GIT_REF }}" + accept_hostkey: yes + key_file: "{{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:code diff --git a/playbooks/roles/xqwatcher/tasks/deploy_watcher.yml b/playbooks/roles/xqwatcher/tasks/deploy_watcher.yml new file mode 100644 index 00000000000..e9a13e29113 --- /dev/null +++ b/playbooks/roles/xqwatcher/tasks/deploy_watcher.yml @@ -0,0 +1,70 @@ +# Installs the xqueue watcher code and supervisor scripts. +# The watcher can watch one or many queues and dispatch submissions +# to the appropriate grader which lives in a separate SCM repository. + +- name: Install application requirements + pip: + requirements: "{{ xqwatcher_requirements_file }}" + virtualenv: "{{ xqwatcher_app_dir }}/venvs/{{ xqwatcher_service_name }}" + virtualenv_python: "{{ xqwatcher_python_version }}" + state: present + become: true + become_user: "{{ xqwatcher_user }}" + tags: + - install + - install:app-requirements + +- name: "Install Datadog APM requirements" + when: COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP + pip: + name: + - ddtrace + extra_args: "--exists-action w" + virtualenv: "{{ xqwatcher_app_dir }}/venvs/{{ xqwatcher_service_name }}" + state: present + become_user: "{{ xqwatcher_user }}" + tags: + - install + - install:app-requirements + +- name: Write out course config files + template: + src: "edx/app/xqwatcher/conf.d/course.json.j2" + dest: "{{ xqwatcher_conf_dir }}/conf.d/{{ item.COURSE }}.json" + owner: "{{ xqwatcher_user }}" + group: "{{ xqwatcher_user }}" + mode: "0644" + with_items: "{{ XQWATCHER_COURSES }}" + tags: + - install + - install:configuration + +- name: Write supervisord config + template: + src: "edx/app/supervisor/conf.d/xqwatcher.conf.j2" + dest: "{{ supervisor_available_dir }}/xqwatcher.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: "0644" + tags: + - install + - install:configuration + +- name: Update supervisor configuration + command: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: Restart xqwatcher + supervisorctl: + name: "{{ xqwatcher_service_name }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + when: not disable_edx_services + become_user: "{{ common_web_user }}" + tags: + - manage + - manage:update diff --git a/playbooks/roles/xqwatcher/tasks/main.yml b/playbooks/roles/xqwatcher/tasks/main.yml new file mode 100644 index 00000000000..a755b2b66b5 --- /dev/null +++ b/playbooks/roles/xqwatcher/tasks/main.yml @@ -0,0 +1,117 @@ +--- +# +# edX Configuration +# +# github: https://github.com/openedx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/openedx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role xqwatcher +# +# Overview: +# +# This play installs a sandboxed, pull grader that integrates with XQueue. The +# code for the XQWatcher lives here: https://github.com/openedx/xqueue-watcher +# +# Multiple courses can be installed on a single server with distinct sandboxes. +# +# Example play: +# +# A play to install the XQWatcher would look like this: +# +# - name: Deploy xqueue-watcher +# hosts: all +# sudo: True +# gather_facts: True +# vars: +# COMMON_APP_DIR: "/edx/app" +# common_web_group: "www-data" +# roles: +# - aws +# - datadog +# - splunkforwarder +# - newrelic +# - xqwatcher +# +# You would use a commone like the following to run the play. +# +# ansible-playbook -i ec2.py ./xqwatcher.yml -e@./example-config.yml +# +# The contents of the example-config.yml would include the queue +# meta data and details related to the repository including the +# grader code. +# +# NOTA BENE: Apparmor does not follow symlinks +# +# XQWATCHER_COURSES: +# - COURSE: "exampleX-101x" +# GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git" +# GIT_REF: "master" +# PYTHON_EXECUTABLE: python2 +# PYTHON_REQUIREMENTS: [] +# QUEUE_NAME: "exampleX-101x" +# QUEUE_CONFIG: +# SERVER: "https://xqueue.example.com/" +# CONNECTIONS: 5 +# AUTH: ["user", "password"] +# HANDLERS: +# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader" +# CODEJAIL: +# name: "exampleX-101x" +# bin_path: "{{ xqwatcher_venv_base }}/exampleX-101x/bin/python" +# user: "exampleX-101x" +# lang: python2 +# KWARGS: +# grader_root: "../data/exampleX-101x/graders/" +# - COURSE: "exampleX-202x" +# GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git" +# GIT_REF: "master" +# PYTHON_EXECUTABLE: python3 +# PYTHON_REQUIREMENTS: [] +# QUEUE_NAME: "exampleX-202x" +# QUEUE_CONFIG: +# SERVER: "https://xqueue.example.com/" +# CONNECTIONS: 5 +# AUTH: ["user", "password"] +# HANDLERS: +# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader" +# CODEJAIL: +# name: "exampleX-202x" +# bin_path: "{{ xqwatcher_venv_base }}/exampleX-202x/bin/python" +# user: "exampleX-202x" +# lang: python3 +# KWARGS: +# grader_root: "../data/exampleX-202x/graders/" + +# XQWATCHER_GIT_IDENTITY: | +# -----BEGIN RSA PRIVATE KEY----- +# Your key if you need to access any private repositories +# -----END RSA PRIVATE KEY----- +# + +- name: Create conf dir + file: + path: "{{ xqwatcher_conf_dir }}" + state: directory + owner: "{{ xqwatcher_user }}" + group: "{{ xqwatcher_user }}" + tags: + - install + - install:base + +- name: Create conf.d dir + file: + path: "{{ xqwatcher_conf_dir }}/conf.d" + state: directory + owner: "{{ xqwatcher_user }}" + group: "{{ xqwatcher_user }}" + tags: + - install + - install:base + +- include: code_jail.yml CODE_JAIL_COMPLAIN=false + +- include: deploy.yml diff --git a/playbooks/roles/xqwatcher/templates/edx/app/supervisor/conf.d/xqwatcher.conf.j2 b/playbooks/roles/xqwatcher/templates/edx/app/supervisor/conf.d/xqwatcher.conf.j2 new file mode 100644 index 00000000000..47b8970d253 --- /dev/null +++ b/playbooks/roles/xqwatcher/templates/edx/app/supervisor/conf.d/xqwatcher.conf.j2 @@ -0,0 +1,26 @@ +# +# {{ ansible_managed }} +# + +{% set xqwatcher_venv_dir = xqwatcher_app_dir + '/venvs/' + xqwatcher_service_name %} +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = xqwatcher_venv_dir + '/bin/newrelic-admin run-program ' + xqwatcher_venv_dir + '/bin/python' %} +{% else %} +{% set executable = xqwatcher_venv_dir + '/bin/python' %} +{% endif %} +{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %} +{% set executable = xqwatcher_venv_dir + '/bin/ddtrace-run ' + executable %} +{% endif -%} + +[program:{{ xqwatcher_service_name }}] +command={{ executable }} -m {{ xqwatcher_module }} -d {{ xqwatcher_conf_dir }} +process_name=%(program_name)s +user={{ common_web_user }} +directory={{ xqwatcher_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +# Copied DD_TRACE_LOG_STREAM_HANDLER config from edx_django_service. This is required +# to disable Datadog trace debug logging. +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ XQWATCHER_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}{% if COMMON_ENABLE_DATADOG and COMMON_ENABLE_DATADOG_APP %}DD_DJANGO_USE_LEGACY_RESOURCE_FORMAT=true,DD_TAGS=service:{{ XQWATCHER_DATADOG_APPNAME }},DD_TRACE_LOG_STREAM_HANDLER=false,{% endif -%} +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/conf.d/course.json.j2 b/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/conf.d/course.json.j2 new file mode 100644 index 00000000000..a45186a27ea --- /dev/null +++ b/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/conf.d/course.json.j2 @@ -0,0 +1,4 @@ +{ + "{{ item.QUEUE_NAME }}": + {{ item.QUEUE_CONFIG | to_nice_json }} +} \ No newline at end of file diff --git a/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/data/requirements.txt.j2 b/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/data/requirements.txt.j2 new file mode 100644 index 00000000000..af69496a967 --- /dev/null +++ b/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/data/requirements.txt.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% for requirement in item.PYTHON_REQUIREMENTS %} +{{ requirement.name }}=={{ requirement.version }} +{% endfor %} diff --git a/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/xqwatcher.json.j2 b/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/xqwatcher.json.j2 new file mode 100644 index 00000000000..46a9d357dbf --- /dev/null +++ b/playbooks/roles/xqwatcher/templates/edx/app/xqwatcher/xqwatcher.json.j2 @@ -0,0 +1 @@ +{{ XQWATCHER_CONFIG | to_nice_json }} \ No newline at end of file diff --git a/playbooks/roles/xqwatcher/templates/etc/apparmor.d/code.jail.j2 b/playbooks/roles/xqwatcher/templates/etc/apparmor.d/code.jail.j2 new file mode 100644 index 00000000000..e71c860b581 --- /dev/null +++ b/playbooks/roles/xqwatcher/templates/etc/apparmor.d/code.jail.j2 @@ -0,0 +1,30 @@ +#include + +{{ item.1.stdout }} { + #include + + {{ xqwatcher_app_dir }}/venvs/{{ item.0.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/** mr, + #todo need a way of providing. + # edxapp_code_dir /common/lib/sandbox-packages/** r, + /tmp/codejail-*/ rix, + /tmp/codejail-*/** wrix, + + # + # Whitelist particiclar shared objects from the system + # python installation + # + /usr/lib/python2.7/lib-dynload/_json.so mr, + /usr/lib/python2.7/lib-dynload/_ctypes.so mr, + /usr/lib/python2.7/lib-dynload/_heapq.so mr, + /usr/lib/python2.7/lib-dynload/_io.so mr, + /usr/lib/python2.7/lib-dynload/_csv.so mr, + /usr/lib/python2.7/lib-dynload/datetime.so mr, + /usr/lib/python2.7/lib-dynload/_elementtree.so mr, + /usr/lib/python2.7/lib-dynload/pyexpat.so mr, + /usr/lib/python2.7/lib-dynload/future_builtins.so mr, + # + # Allow access to selections from /proc + # + /proc/*/mounts r, + +} diff --git a/playbooks/roles/xqwatcher/templates/etc/sudoers.d/95-course-sandbox.j2 b/playbooks/roles/xqwatcher/templates/etc/sudoers.d/95-course-sandbox.j2 new file mode 100644 index 00000000000..08bbb29011c --- /dev/null +++ b/playbooks/roles/xqwatcher/templates/etc/sudoers.d/95-course-sandbox.j2 @@ -0,0 +1,3 @@ +{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=({{ item.QUEUE.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE.HANDLERS[0].CODEJAIL.name }}/bin/python +{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/bin/kill +{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill diff --git a/playbooks/roles/xqwatcher/templates/etc/sudoers.d/95-xqwatcher.j2 b/playbooks/roles/xqwatcher/templates/etc/sudoers.d/95-xqwatcher.j2 new file mode 100644 index 00000000000..82f03335d85 --- /dev/null +++ b/playbooks/roles/xqwatcher/templates/etc/sudoers.d/95-xqwatcher.j2 @@ -0,0 +1,3 @@ +{{ common_web_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python +{{ common_web_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) NOPASSWD:/bin/kill +{{ common_web_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) NOPASSWD:/usr/bin/pkill diff --git a/playbooks/roles/xserver/defaults/main.yml b/playbooks/roles/xserver/defaults/main.yml deleted file mode 100644 index bc07dde7367..00000000000 --- a/playbooks/roles/xserver/defaults/main.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Variables for the xserver. ---- - -XSERVER_NGINX_PORT: 18050 - -XSERVER_RUN_URL: '' -XSERVER_LOGGING_ENV: 'sandbox' -XSERVER_SYSLOG_SERVER: '' -# For 6.00x use "{{ xserver_data_dir }}/data/content-mit-600x~2012_Fall" -XSERVER_GRADER_DIR: !!null -# For 6.00x use "git@github.com:/MITx/6.00x.git" -XSERVER_GRADER_SOURCE: !!null -# This must be set to run this role -# Example: "{{ secure_dir }}/files/git-identity" -XSERVER_LOCAL_GIT_IDENTITY: !!null - -XSERVER_LANG: "en_US.UTF-8" - -# Internal role vars below this line - -xserver_app_dir: "{{ COMMON_APP_DIR }}/xserver" -xserver_code_dir: "{{ xserver_app_dir }}/xserver" -xserver_data_dir: "{{ COMMON_DATA_DIR }}/xserver" -xserver_venvs_dir: "{{ xserver_app_dir }}/venvs" -xserver_venv_dir: "{{ xserver_venvs_dir }}/xserver" -xserver_venv_sandbox_dir: "{{ xserver_venv_dir }}-sandbox" -xserver_venv_bin: "{{ xserver_venv_dir }}/bin" -xserver_user: "xserver" -xserver_sandbox_user: "sandbox" -xserver_log_dir: "{{ COMMON_LOG_DIR }}/xserver" -xserver_grader_root: "{{ XSERVER_GRADER_DIR }}/graders" -xserver_git_identity: "{{ xserver_app_dir }}/{{ XSERVER_LOCAL_GIT_IDENTITY|basename }}" - -xserver_env_config: - RUN_URL: $XSERVER_RUN_URL - GRADER_ROOT: $xserver_grader_root - LOGGING_ENV: $XSERVER_LOGGING_ENV - LOG_DIR: "{{ xserver_log_dir }}" - SYSLOG_SERVER: $XSERVER_SYSLOG_SERVER - SANDBOX_PYTHON: '{{ xserver_venv_sandbox_dir }}/bin/python' - -xserver_source_repo: "git://github.com/edx/xserver.git" -# This should probably be overridden in the playbook or groupvars -# with the default pointing to the head of master. -xserver_version: master -xserver_grader_version: master - - -xserver_requirements_file: "{{ xserver_code_dir }}/requirements.txt" - -xserver_gunicorn_port: 8050 -xserver_gunicorn_host: 'localhost' -xserver_gunicorn_workers: "{{ ansible_processor|length }}" - -xserver_debian_pkgs: - - build-essential - - gcc - - gfortran - - liblapack-dev - - libxml++2.6-dev - - libxml2-dev - - libxml2-utils - - libxslt1-dev - - python-dev - - apparmor-utils diff --git a/playbooks/roles/xserver/handlers/main.yml b/playbooks/roles/xserver/handlers/main.yml deleted file mode 100644 index 51e50f4f727..00000000000 --- a/playbooks/roles/xserver/handlers/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://github.com/edx/configuration/wiki -# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Handlers for role xserver -# -# Overview: -# - -- name: restart xserver - supervisorctl_local: > - name=xserver - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=restarted - diff --git a/playbooks/roles/xserver/meta/main.yml b/playbooks/roles/xserver/meta/main.yml deleted file mode 100644 index 107f1e98c29..00000000000 --- a/playbooks/roles/xserver/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - supervisor diff --git a/playbooks/roles/xserver/tasks/deploy.yml b/playbooks/roles/xserver/tasks/deploy.yml deleted file mode 100644 index 90e07a42f51..00000000000 --- a/playbooks/roles/xserver/tasks/deploy.yml +++ /dev/null @@ -1,86 +0,0 @@ -- name: "writing supervisor script" - template: > - src=xserver.conf.j2 dest={{ supervisor_cfg_dir }}/xserver.conf - owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 - -- name: checkout code - git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}} - sudo_user: "{{ xserver_user }}" - notify: restart xserver - -- name: install requirements - pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present - sudo_user: "{{ xserver_user }}" - notify: restart xserver - -- name: install sandbox requirements - pip: requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present - sudo_user: "{{ xserver_user }}" - notify: restart xserver - -- name: create xserver application config - template: src=xserver.env.json.j2 dest={{ xserver_app_dir }}/env.json - sudo_user: "{{ xserver_user }}" - notify: restart xserver - -# Check to see if the identity file exists before copying. -# This extra check is done to prevent failures when -# ansible-playbook is run locally -- stat: path={{ XSERVER_LOCAL_GIT_IDENTITY }} - register: xserver_identity - -- name: install read-only ssh key for the content repo that is required for grading - copy: > - src={{ XSERVER_LOCAL_GIT_IDENTITY }} dest={{ xserver_git_identity }} - owner={{ xserver_user }} group={{ xserver_user }} mode=0600 - notify: restart xserver - when: xserver_identity.stat.exists - -- name: upload ssh script - template: > - src=git_ssh.sh.j2 dest=/tmp/git_ssh.sh - owner={{ xserver_user }} mode=750 - notify: restart xserver - when: xserver_identity.stat.exists - -- name: checkout grader code - git: dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }} - environment: - GIT_SSH: /tmp/git_ssh.sh - notify: restart xserver - sudo_user: "{{ xserver_user }}" - when: xserver_identity.stat.exists - -- name: remove read-only ssh key for the content repo - file: path={{ xserver_git_identity }} state=absent - notify: restart xserver - when: xserver_identity.stat.exists - - # call supervisorctl update. this reloads - # the supervisorctl config and restarts - # the services if any of the configurations - # have changed. - # -- name: update supervisor configuration - shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" - register: supervisor_update - changed_when: supervisor_update.stdout != "" - -- name: ensure xserver is started - supervisorctl_local: > - name=xserver - supervisorctl_path={{ supervisor_ctl }} - config={{ supervisor_cfg }} - state=started - -- name: create a symlink for venv python - file: > - src="http://github.com/%7B%7B%20xserver_venv_bin%20%7D%7D/%7B%7B%20item%20%7D%7D" - dest={{ COMMON_BIN_DIR }}/{{ item }}.xserver - state=link - with_items: - - python - - pip - -- name: enforce app-armor rules - command: aa-enforce {{ xserver_venv_sandbox_dir }} diff --git a/playbooks/roles/xserver/tasks/main.yml b/playbooks/roles/xserver/tasks/main.yml deleted file mode 100644 index ef740c56ad7..00000000000 --- a/playbooks/roles/xserver/tasks/main.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Provision and bring up xserver -# As of right now this role requires -# access to the edX 6.00x repo which is not public ---- - -- name: checking for grader info - fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!" - when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE - -- name: checking for git identity - fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role" - when: not XSERVER_LOCAL_GIT_IDENTITY - -- name: create application user - user: > - name="{{ xserver_user }}" - home="{{ xserver_app_dir }}" - createhome=no - shell=/bin/false - -- name: create application sandbox user - user: > - name="{{ xserver_sandbox_user }}" - createhome=no - shell=/bin/false - -- name: create xserver app and data dirs - file: > - path="{{ item }}" - state=directory - owner="{{ xserver_user }}" - group="{{ common_web_group }}" - with_items: - - "{{ xserver_app_dir }}" - - "{{ xserver_venvs_dir }}" - - "{{ xserver_data_dir }}" - - "{{ xserver_data_dir }}/data" - -- name: create sandbox sudoers file - template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440 - -# Make sure this line is in the common-session file. -- name: ensure pam-limits module is loaded - lineinfile: - dest=/etc/pam.d/common-session - regexp="session required pam_limits.so" - line="session required pam_limits.so" - -- name: set sandbox limits - template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf - first_available_file: - - "{{ secure_dir }}/sandbox.conf.j2" - - "sandbox.conf.j2" - -- name: install system dependencies of xserver - apt: pkg={{ item }} state=present - with_items: xserver_debian_pkgs - -- name: load python-sandbox apparmor profile - template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox - first_available_file: - - "{{ secure_dir }}/files/edx_apparmor_sandbox.j2" - - "usr.bin.python-sandbox.j2" - -- include: deploy.yml tags=deploy diff --git a/playbooks/roles/xserver/templates/99-sandbox.j2 b/playbooks/roles/xserver/templates/99-sandbox.j2 deleted file mode 100644 index 4b069c49ce2..00000000000 --- a/playbooks/roles/xserver/templates/99-sandbox.j2 +++ /dev/null @@ -1 +0,0 @@ -www-data ALL=({{ xserver_sandbox_user }}) NOPASSWD:{{xserver_venv_sandbox_dir}}/bin/python diff --git a/playbooks/roles/xserver/templates/git_ssh.sh.j2 b/playbooks/roles/xserver/templates/git_ssh.sh.j2 deleted file mode 100644 index 542d626509c..00000000000 --- a/playbooks/roles/xserver/templates/git_ssh.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ xserver_git_identity }} "$@" diff --git a/playbooks/roles/xserver/templates/sandbox.conf.j2 b/playbooks/roles/xserver/templates/sandbox.conf.j2 deleted file mode 100644 index 51e5d235b00..00000000000 --- a/playbooks/roles/xserver/templates/sandbox.conf.j2 +++ /dev/null @@ -1,16 +0,0 @@ -{{ xserver_sandbox_user }} hard core 0 -{{ xserver_sandbox_user }} hard data 100000 -{{ xserver_sandbox_user }} hard fsize 10000 -{{ xserver_sandbox_user }} hard memlock 10000 -{{ xserver_sandbox_user }} hard nofile 20 -{{ xserver_sandbox_user }} hard rss 10000 -{{ xserver_sandbox_user }} hard stack 100000 -{{ xserver_sandbox_user }} hard cpu 0 -{{ xserver_sandbox_user }} hard nproc 8 -{{ xserver_sandbox_user }} hard as 32000 -{{ xserver_sandbox_user }} hard maxlogins 1 -{{ xserver_sandbox_user }} hard priority 19 -{{ xserver_sandbox_user }} hard locks 4 -{{ xserver_sandbox_user }} hard sigpending 100 -{{ xserver_sandbox_user }} hard msgqueue 100000 -{{ xserver_sandbox_user }} hard nice 19 diff --git a/playbooks/roles/xserver/templates/usr.bin.python-sandbox.j2 b/playbooks/roles/xserver/templates/usr.bin.python-sandbox.j2 deleted file mode 100644 index cc5a64d7c24..00000000000 --- a/playbooks/roles/xserver/templates/usr.bin.python-sandbox.j2 +++ /dev/null @@ -1,13 +0,0 @@ -#include - -/usr/bin/python-sandbox { - #include - - /usr/bin/python-sandbox mr, - /usr/include/python2.7/** r, - /usr/local/lib/python2.7/** r, - /usr/lib/python2.7** rix, - - /tmp/** rix, -} - diff --git a/playbooks/roles/xserver/templates/xserver.conf.j2 b/playbooks/roles/xserver/templates/xserver.conf.j2 deleted file mode 100644 index 91426988cd7..00000000000 --- a/playbooks/roles/xserver/templates/xserver.conf.j2 +++ /dev/null @@ -1,13 +0,0 @@ -[program:xserver] - -command={{ xserver_venv_bin }}/gunicorn --preload -b {{ xserver_gunicorn_host }}:{{ xserver_gunicorn_port }} -w {{ xserver_gunicorn_workers }} --timeout=30 --pythonpath={{ xserver_code_dir }} pyxserver_wsgi:application - -user={{ common_web_user }} -directory={{ xserver_code_dir }} - -environment=PID=/var/tmp/xserver.pid,NEW_RELIC_CONFIG_FILE={{ xserver_app_dir }}/newrelic.ini,NEWRELIC={{ xserver_venv_dir }}/bin/newrelic-admin,PORT={{ xserver_gunicorn_port }},ADDRESS={{ xserver_gunicorn_host }},LANG={{ XSERVER_LANG }},DJANGO_SETTINGS_MODULE=xserver_aws_settings,SERVICE_VARIANT="xserver" - -stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log -killasgroup=true -stopasgroup=true diff --git a/playbooks/roles/xserver/templates/xserver.env.json.j2 b/playbooks/roles/xserver/templates/xserver.env.json.j2 deleted file mode 100644 index 345fa03d577..00000000000 --- a/playbooks/roles/xserver/templates/xserver.env.json.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ xserver_env_config | to_nice_json }} diff --git a/playbooks/run_role.yml b/playbooks/run_role.yml index 1f57a68af75..c63a3361b2b 100644 --- a/playbooks/run_role.yml +++ b/playbooks/run_role.yml @@ -1,10 +1,10 @@ --- -# Creates a new ansible role +# Runs an ansible role # Usage: # ansible-playbook ./run_role.yml -i "hostname," -e role=my_awesome_role # - hosts: all - sudo: True - gather_facts: False + become: True + gather_facts: True roles: - "{{role}}" diff --git a/playbooks/sample_vars/passwords.yml b/playbooks/sample_vars/passwords.yml new file mode 100644 index 00000000000..ba574030254 --- /dev/null +++ b/playbooks/sample_vars/passwords.yml @@ -0,0 +1,92 @@ +# This file is used to generate overriden, unique secrets +# for Open edX deployment environments +# +# The current process is described here: https://openedx.atlassian.net/wiki/x/dQArCQ +# + +ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD: !!null +ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_PASSPHRASE: !!null +COMMON_HTPASSWD_PASS: !!null +COMMON_HTPASSWD_USER: !!null +PROSPECTUS_PREVIEW_COMMON_HTPASSWD_PASS: !!null +PROSPECTUS_PREVIEW_COMMON_HTPASSWD_USER: !!null +COMMON_MONGO_READ_ONLY_PASS: !!null +COMMON_MYSQL_ADMIN_PASS: !!null +COMMON_MYSQL_MIGRATE_PASS: !!null +COMMON_MYSQL_READ_ONLY_PASS: !!null +COMMON_ANALYTICS_MYSQL_READ_ONLY_PASS: !!null +CREDENTIALS_MYSQL_PASSWORD: !!null +DISCOVERY_EMAIL_HOST_PASSWORD: !!null +DISCOVERY_MYSQL_PASSWORD: !!null +ECOMMERCE_BROKER_PASSWORD: '{{ REDIS_PASSWORD }}' +ECOMMERCE_DATABASE_PASSWORD: !!null +ECOMMERCE_WORKER_BROKER_PASSWORD: '{{ REDIS_PASSWORD }}' +EDXAPP_CELERY_PASSWORD: '{{ REDIS_PASSWORD }}' +EDXAPP_COMMENTS_SERVICE_KEY: '{{ FORUM_API_KEY }}' +EDXAPP_EMAIL_HOST_PASSWORD: !!null +EDXAPP_MYSQL_CSMH_PASSWORD: !!null +EDXAPP_MYSQL_PASSWORD: !!null +EDXAPP_MYSQL_PASSWORD_ADMIN: !!null +EDXAPP_MYSQL_PASSWORD_READ_ONLY: !!null + +EDXAPP_MYSQL_CSMH_USER: "edxapp_cmsh001" + +FLOWER_BROKER_PASSWORD: '{{ REDIS_PASSWORD }}' +FORUM_API_KEY: !!null +HIVE_METASTORE_DATABASE_PASSWORD: !!null +INSIGHTS_DATABASE_PASSWORD: !!null +INSIGHTS_EMAIL_HOST_PASSWORD: !!null +JENKINS_ANALYTICS_GITHUB_CREDENTIAL_PASSPHRASE: !!null +JENKINS_ANALYTICS_USER_PASSWORD_PLAIN: !!null +MARIADB_CLUSTER_PASSWORD_ADMIN: !!null +MONGO_ADMIN_PASSWORD: !!null +mongo_admin_password: '{{ MONGO_ADMIN_PASSWORD }}' +POSTFIX_QUEUE_EXTERNAL_SMTP_PASSWORD: !!null +REDIS_PASSWORD: !!null +SPLUNKFORWARDER_PASSWORD: !!null +SPLUNK_SMTP_PASSWORD: !!null +SPLUNK_SSL_PASSWORD: !!null +SWIFT_LOG_SYNC_PASSWORD: !!null +XQUEUE_BASIC_AUTH_PASSWORD: !!null +XQUEUE_BASIC_AUTH_USER: !!null +XQUEUE_MYSQL_PASSWORD: !!null +XQUEUE_RABBITMQ_PASS: !!null + +EDXAPP_MONGO_PASSWORD: !!null +EDXAPP_MONGO_USER: 'edxapp' + +FORUM_MONGO_USER: "cs_comments_service" +FORUM_MONGO_PASSWORD: !!null + +RABBIT_ADMIN_PASSWORD: !!null + +# Secret keys for Django Applicatons +ANALYTICS_API_SECRET_KEY: !!null #SECRET_KEY +CREDENTIALS_SECRET_KEY: !!null #SECRET_KEY +DISCOVERY_SECRET_KEY: !!null #SECRET_KEY +ECOMMERCE_SECRET_KEY: !!null #SECRET_KEY +EDX_NOTES_API_SECRET_KEY: !!null #SECRET_KEY +EDXAPP_EDXAPP_SECRET_KEY: !!null #SECRET_KEY +EDXAPP_PROFILE_IMAGE_HASH_SEED: !!null #SECRET_KEY +INSIGHTS_SECRET_KEY: !!null #SECRET_KEY + +MONGO_USERS: + - user: "{{ FORUM_MONGO_USER }}" + password: "{{ FORUM_MONGO_PASSWORD }}" + database: cs_comments_service + roles: readWrite + - user: "{{ EDXAPP_MONGO_USER }}" + password: "{{ EDXAPP_MONGO_PASSWORD }}" + database: edxapp + roles: readWrite + +# JWT-related settings +COMMON_JWT_AUDIENCE: !!null +COMMON_JWT_SECRET_KEY: !!null #SECRET_KEY +ECOMMERCE_WORKER_JWT_SECRET_KEY: !!null #SECRET_KEY + +FERNET_KEYS: +- !!null #SECRET_KEY +EDXAPP_RETIRED_USER_SALTS: +- !!null +COMMON_XQUEUE_LMS_PASSWORD: !!null diff --git a/playbooks/sample_vars/server_vars.yml b/playbooks/sample_vars/server_vars.yml new file mode 100644 index 00000000000..7e99adf960b --- /dev/null +++ b/playbooks/sample_vars/server_vars.yml @@ -0,0 +1,180 @@ +--- + +#EDXAPP_PREVIEW_LMS_BASE: preview-${deploy_host} +#EDXAPP_LMS_BASE: ${deploy_host} +#EDXAPP_CMS_BASE: studio-${deploy_host} +#EDXAPP_SITE_NAME: ${deploy_host} +#edx_internal: True +#COMMON_USER_INFO: +# - name: ${github_username} +# github: true +# type: admin +#USER_CMD_PROMPT: '[$name_tag] ' +#COMMON_ENABLE_NEWRELIC_APP: $enable_newrelic +#COMMON_ENABLE_DATADOG: $enable_datadog +#FORUM_NEW_RELIC_ENABLE: $enable_newrelic +#ENABLE_PERFORMANCE_COURSE: $performance_course +#ENABLE_EDX_DEMO_COURSE: $edx_demo_course +#EDXAPP_NEWRELIC_LMS_APPNAME: sandbox-${dns_name}-edxapp-lms +#EDXAPP_NEWRELIC_CMS_APPNAME: sandbox-${dns_name}-edxapp-cms +#EDXAPP_NEWRELIC_WORKERS_APPNAME: sandbox-${dns_name}-edxapp-workers +#XQUEUE_NEWRELIC_APPNAME: sandbox-${dns_name}-xqueue +#FORUM_NEW_RELIC_APP_NAME: sandbox-${dns_name}-forums +#SANDBOX_USERNAME: $github_username +#EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT: "https://ecommerce-${deploy_host}/" +#EDXAPP_ECOMMERCE_API_URL: "https://ecommerce-${deploy_host}/api/v2" +# +#ECOMMERCE_ECOMMERCE_URL_ROOT: "https://ecommerce-${deploy_host}/" +#ECOMMERCE_LMS_URL_ROOT: "https://${deploy_host}/" +#ECOMMERCE_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true +# +#CREDENTIALS_LMS_URL_ROOT: "https://${deploy_host}/" +#CREDENTIALS_URL_ROOT: "https://credentials-${deploy_host}/" +#CREDENTIALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true +#COURSE_DISCOVERY_ECOMMERCE_API_URL: "https://ecommerce-${deploy_host}/api/v2" +# +#DISCOVERY_URL_ROOT: "https://discovery-${deploy_host}/" +#DISCOVERY_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true + +## These flags are used to toggle role installation +## in the plays that install each server cluster +#COMMON_NEWRELIC_LICENSE: '' +#COMMON_AWS_SYNC: True +#NEWRELIC_LICENSE_KEY: '' +#NEWRELIC_LOGWATCH: [] + # - logwatch-cms-errors.j2 + # - logwatch-lms-errors.j2 +#COMMON_ENABLE_NEWRELIC: True + +## Datadog Settings +#datadog_api_key: "" +#COMMON_DATADOG_API_KEY: "" +#DATADOG_API_KEY: "" + +## NGINX settings: +#NGINX_ENABLE_SSL: True +#NGINX_SSL_CERTIFICATE: '/path/to/ssl.crt" +#NGINX_SSL_KEY: '/path/to/ssl.key' +#NGINX_SERVER_ERROR_IMG: https://files.edx.org/images-public/edx-sad-small.png +#EDXAPP_XBLOCK_FS_STORAGE_BUCKET: 'your-xblock-storage-bucket' +#EDXAPP_XBLOCK_FS_STORAGE_PREFIX: 'sandbox-edx/' +#EDXAPP_LMS_SSL_NGINX_PORT: 443 +#EDXAPP_CMS_SSL_NGINX_PORT: 443 +#EDXAPP_LMS_NGINX_PORT: 80 +#EDXAPP_CMS_NGINX_PORT: 80 +#EDXAPP_WORKERS: +# lms: 2 +# cms: 2 +#migrate_db: "yes" +#rabbitmq_ip: "127.0.0.1" +#rabbitmq_refresh: True +#COMMON_HOSTNAME: edx-server +#COMMON_DEPLOYMENT: edx +#COMMON_ENVIRONMENT: sandbox +#AS_WORKERS: 1 +#ANALYTICS_WORKERS: 1 +#ANALYTICS_API_GUNICORN_WORKERS: 1 +## Settings for Grade downloads +#EDXAPP_GRADE_STORAGE_TYPE: 's3' +#EDXAPP_GRADE_BUCKET: 'your-grade-bucket' +#EDXAPP_GRADE_ROOT_PATH: 'sandbox' +#EDXAPP_SEGMENT_IO: 'true' +#EDXAPP_SEGMENT_IO_LMS: 'true' +#EDXAPP_SEGMENT_IO_KEY: 'your segment.io key' +#EDXAPP_SEGMENT_IO_LMS_KEY: 'your segment.io key' +#EDXAPP_YOUTUBE_API_KEY: "Your Youtube API Key" +# +#EDXAPP_FEATURES: +# ENABLE_DISCUSSION_SERVICE: true +# ENABLE_DISCUSSION_HOME_PANEL: true +# SUBDOMAIN_BRANDING: false +# SUBDOMAIN_COURSE_LISTINGS: false +# PREVIEW_LMS_BASE: "{{ EDXAPP_PREVIEW_LMS_BASE }}" +# ENABLE_GRADE_DOWNLOADS: true +# ENABLE_MKTG_SITE: "{{ EDXAPP_ENABLE_MKTG_SITE }}" +# ENABLE_PUBLISHER: "{{ EDXAPP_ENABLE_PUBLISHER }}" +# AUTOMATIC_AUTH_FOR_TESTING: "{{ EDXAPP_ENABLE_AUTO_AUTH }}" +# ENABLE_THIRD_PARTY_AUTH: "{{ EDXAPP_ENABLE_THIRD_PARTY_AUTH }}" +# AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING: true +# ENABLE_PAYMENT_FAKE: true +# ENABLE_VIDEO_UPLOAD_PIPELINE: true +# SEPARATE_VERIFICATION_FROM_PAYMENT: true +# ENABLE_COMBINED_LOGIN_REGISTRATION: true +# ENABLE_CORS_HEADERS: true +# ENABLE_MOBILE_REST_API: true +# ENABLE_OAUTH2_PROVIDER: true +# LICENSING: true +# CERTIFICATES_HTML_VIEW: true +# CUSTOM_COURSES_EDX: true +# ENABLE_SYSADMIN_DASHBOARD: true +# +#EDXAPP_CORS_ORIGIN_WHITELIST: +# - "example.org" +# - "www.example.org" +# - "{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}" +# +#EDXAPP_LOGIN_REDIRECT_WHITELIST: +# - "lms.example.org" +# - "studio.example.org" +# - "insights.example.org" +# +#EDXAPP_VIDEO_UPLOAD_PIPELINE: +# BUCKET: "your-video-bucket" +# ROOT_PATH: "edx-video-upload-pipeline/unprocessed" +# +#EDXAPP_PROFILE_IMAGE_BACKEND: +# class: storages.backends.s3boto3.S3Boto3Storage +# options: +# location: /{{ ansible_ec2_public_ipv4 }} +# bucket: your-profile-image-bucket +# custom_domain: yourcloudfrontdomain.cloudfront.net +# headers: +# Cache-Control: max-age-{{ EDXAPP_PROFILE_IMAGE_MAX_AGE }} +#EDXAPP_PROFILE_IMAGE_HASH_SEED: "SECRET KEY HERE" +# +##TODO: remove once ansible_provision.sh stops sucking or is burned to the ground +#EDXAPP_PROFILE_IMAGE_MAX_AGE: 31536000 +# +## send logs to s3 +#AWS_S3_LOGS: true +#AWS_S3_LOGS_NOTIFY_EMAIL: SRE+logs@example.com +#AWS_S3_LOGS_FROM_EMAIL: sre@example.com +#EDX_ANSIBLE_DUMP_VARS: true +#CONFIGURATION_VERSION: release +#EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: true +#EDXAPP_USE_GIT_IDENTITY: true +#_local_git_identity: | +# -----BEGIN RSA PRIVATE KEY----- +# ssh private key here +# -----END RSA PRIVATE KEY----- +# +#EDXAPP_GIT_IDENTITY: "{{ _local_git_identity }}" +# +################################################################ +## +## Analytics API Settings +## +#ANALYTICS_API_PIP_EXTRA_ARGS: "--use-wheel --no-index --find-links=http://edx-wheelhouse.s3-website-us-east-1.amazonaws.com/Ubuntu/precise/Python-2.7" +#ANALYTICS_API_GIT_IDENTITY: "{{ _local_git_identity }}" +# +#TESTCOURSES_EXPORTS: +# - github_url: "https://github.com/openedx/edx-demo-course.git" +# install: "{{ ENABLE_EDX_DEMO_COURSE }}" +# course_id: "course-v1:edX+DemoX+Demo_Course" +# +#EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME: edxuploads-sandbox +#EDXAPP_AWS_STORAGE_BUCKET_NAME: edxuploads-sandbox +# +#EDXAPP_SESSION_COOKIE_SECURE: true +# +## Celery Flower configuration +## By default, we now turn on Google OAuth2 configuration +## This disables that on sandboxes so you can use flower to manage your +## local celery processes. +#FLOWER_AUTH_REGEX: "" +# +################################################################ +## +## LOCUST Settings +## +#LOCUST_GIT_IDENTITY: "{{ _local_git_identity }}" diff --git a/playbooks/sample_vars/test-mongo.yml b/playbooks/sample_vars/test-mongo.yml new file mode 100644 index 00000000000..5649ed55b6f --- /dev/null +++ b/playbooks/sample_vars/test-mongo.yml @@ -0,0 +1,90 @@ +# Example ansible commands +# Three node replica set +# ansible-playbook -i '203.0.113.12,203.0.113.20,203.0.113.68' -u ubuntu mongo_3_2.yml -e@sample_vars/test-mongo.yml +# Single node +# ansible-playbook -i '203.0.113.12' -u ubuntu mongo_3_2.yml -e@sample_vars/test-mongo.yml + +# Passwords and relication keys in this file are examples and must be changed. +# You must change any variable with the string "CHANGEME" in it + +MONGO_HEARTBEAT_TIMEOUT_SECS: 3 +EDXAPP_MONGO_HOSTS: "{{ MONGO_RS_CONFIG.members|map(attribute='host')|list }}" + +MONGO_VOLUMES: + - device: /dev/xvdb + mount: /edx/var/mongo + options: "defaults,noatime" + fstype: ext4 + - device: /dev/xvdc + mount: /edx/var/mongo/mongodb/journal + options: "defaults,noatime" + fstype: ext4 + +##### edx-secure/ansible/vars/stage-edx.yml ##### +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'CHANGEME_794jtB7zLIvDjHGu2gD6wKUU' +MONGO_MONITOR_USER: 'cloud-manager' +MONGO_MONITOR_PASSWORD: 'CHANGEME_7DJ9FTWHJx4TCSPxSmx1k3DD' +MONGO_BACKUP_USER: 'backup' +MONGO_BACKUP_PASSWORD: 'CHANGEME_XbJA3LouKV5QDv2NQixnOrQj' +MONGO_REPL_SET: 'test-repl-set' +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: +# Must use private IPs here, mongo role assumes internal ips when checking if node is in this list + - host: '203.0.113.12' + - host: '203.0.113.20' + - host: '203.0.113.68' + +MONGO_CLUSTER_KEY: | + CHANGEME/CHANGE/ME/CHANGE/ME9YeSrVDYxont1rDh2nBAEGB30PhwG9ghtPY + c1QUc2etVfMnE9vbUhLimU/Xb4j4yLRDurOTi8eYoE8eAvAquLalcz7URMuw8Qt3 + fIyFa3wSXyE04rpsoBrpG53HwwFrN3pra3x4YPs8g77v50V56gfwaStNJ3KPpa5w + RukdFXnCUPRyONSJEYwjPzI2WucnAZqlDYre6qjxL+6hCjZ4vS/RPgfoHGTUQ62W + 9k2TiWar/c1nL6rZvGhGJHFmZalyL9pJ4SAaYoFPhCmcHusyzjlM8p27AsyJwDyr + kSI/JPBLMLDoiLUAPHGz1jrGM+iOgTilmfPVy+0UVc9Bf2H4Vs1zKJpUM2RNAPJ7 + S9DzB6q8WtRothbEtwnppWojceid202uLEYCpqhCcH6LR0lTcyJiXCRyHAtue813 + 5Djv1m3Z8p2z6B+3ab7CDq+WV9OrBI7+eynnwYGgp4eIHQNNSb1/x/8TeiVMQYyJ + ONj4PbgVwsdhL+RUuVqCzjK0F4B4FOSSKXbu07L4F/PALqVugH/YebAUAJVo027r + ca669FSrQ8q6Jgx3M1mCoZkp23CVt3B28+EwpyABh6cwxIrTIvxU6cvxX8M2piz+ + 63nKUKoStNhmRA0EGfbY9WRmk1RNlC2jVJAvvJUnNXnouNF2DGV4pRNGlb7yfS+n + S+3ZZpUDpTLx36CWGPJ1ZpwuZ0p5JPbCSW6gpFZqGFZsQERg6L8Q9FkwESnbfw+V + oDiVJlClJA2AFXMnAt9q1dhM7OVBj12x9YI5yf1Lw0vVLb7JDmWI7IGaibyxtjFi + jO4bAEl4RZu3364nFH/nVf6kV2S29pAREMqxbcR5O75OuHFN9cqG7BhYClg+5mWg + mGKLLgpXsJxd6bMGjxH1uc30E2qbU1mkrW29Ocl5DFuXevK2dxVj71ZiYESIUg87 + KRdC8S3Mljym9ruu4nDC3Sk4xLLuUGp/yD2O0B0dZTfYOJdt + +COMMON_MONGO_READ_ONLY_USER: 'read_only' +COMMON_MONGO_READ_ONLY_PASS: "CHANGEME correct horse battery staple" + +EDXAPP_MONGO_PASSWORD: 'CHANGEME_H8uoZEZJun9BeR5u8mMyA4yh' +EDXAPP_MONGO_USER: 'edxapp003' + +FORUM_MONGO_USER: "comments001" +FORUM_MONGO_PASSWORD: "CHANGEME_j5fhX0pOwEL1S5WUFZkbZAyZ" + +login_host: "{{ EDXAPP_MONGO_HOSTS[1] }}" +repl_set: "{{ EDXAPP_MONGO_REPLICA_SET }}" +MONGO_USERS: + - user: "{{ EDXAPP_MONGO_USER }}" + password: "{{ EDXAPP_MONGO_PASSWORD }}" + database: "{{ EDXAPP_MONGO_DB_NAME }}" + roles: readWrite + - user: "{{ COMMON_MONGO_READ_ONLY_USER }}" + password: "{{ COMMON_MONGO_READ_ONLY_PASS }}" + database: "{{ EDXAPP_MONGO_DB_NAME }}" + roles: + - { db: "{{ EDXAPP_MONGO_DB_NAME }}", role: "read" } + - { db: "admin", role: "clusterMonitor" } + - user: "{{ MONGO_MONITOR_USER }}" + password: "{{ MONGO_MONITOR_PASSWORD }}" + database: "admin" + roles: clusterMonitor + - user: "{{ MONGO_BACKUP_USER }}" + password: "{{ MONGO_BACKUP_PASSWORD }}" + database: "admin" + roles: backup + +EDXAPP_MONGO_DB_NAME: 'test-mongo-db' +EDXAPP_MONGO_PORT: 27017 +EDXAPP_MONGO_REPLICA_SET: '{{ MONGO_REPL_SET }}' diff --git a/playbooks/secure_example/files/git-identity b/playbooks/secure_example/files/git-identity deleted file mode 100644 index 00f0519625f..00000000000 --- a/playbooks/secure_example/files/git-identity +++ /dev/null @@ -1 +0,0 @@ -IDENTITY FILE FOR GIT diff --git a/playbooks/secure_example/keys/frank.key b/playbooks/secure_example/keys/frank.key deleted file mode 100644 index fb7c53ab4c3..00000000000 --- a/playbooks/secure_example/keys/frank.key +++ /dev/null @@ -1,2 +0,0 @@ -ssh-rsa ASFDG frank@somehost -ssh-rsa GHJKL frank@anotherhost diff --git a/playbooks/secure_example/keys/joe.key b/playbooks/secure_example/keys/joe.key deleted file mode 100644 index 67d926d290f..00000000000 --- a/playbooks/secure_example/keys/joe.key +++ /dev/null @@ -1,2 +0,0 @@ -ssh-rsa ASFDG joe@somehost -ssh-rsa GHJKL joe@notherhost diff --git a/playbooks/secure_example/vars/README.md b/playbooks/secure_example/vars/README.md deleted file mode 100644 index 0e278ba52bb..00000000000 --- a/playbooks/secure_example/vars/README.md +++ /dev/null @@ -1 +0,0 @@ -This is an example secure/ data which would normally have passwords and sensitive bits diff --git a/playbooks/secure_example/vars/edx_jenkins_tests.yml b/playbooks/secure_example/vars/edx_jenkins_tests.yml deleted file mode 100644 index a1e39138a97..00000000000 --- a/playbooks/secure_example/vars/edx_jenkins_tests.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# override the default virtualenv for ora -ora_venv_dir: "/opt/wwc/virtualenvs/ora" -# ease and ora share the same virtualenv -ease_venv_dir: "/opt/wwc/virtualenvs/ora" diff --git a/playbooks/secure_example/vars/edxapp_ref_custom_vars.yml b/playbooks/secure_example/vars/edxapp_ref_custom_vars.yml deleted file mode 100644 index 899e370d827..00000000000 --- a/playbooks/secure_example/vars/edxapp_ref_custom_vars.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -lms_auth_config: - 'DATABASES': - 'default': { 'ENGINE': 'custom', - 'HOST': 'custom', 'NAME': 'custom', - 'PASSWORD': 'custom', 'PORT': 0000, - 'USER': 'custom'} diff --git a/playbooks/secure_example/vars/edxapp_ref_users.yml b/playbooks/secure_example/vars/edxapp_ref_users.yml deleted file mode 100644 index 40cea8d05e5..00000000000 --- a/playbooks/secure_example/vars/edxapp_ref_users.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# these user lists cannot be merged -# because they are not hashes - -env_users: [] - -env_keys: [] - -# administrator accounts, added to all roles -# The create_users role task automatically adds all these users to the -# 'adm' and 'edx' system groups -admin_users: -- user: joe - email: joe@example.com - groups: - # But at least one group must be defined - - adm - -admin_keys: -- user: joe - path: "{{ secure_dir }}/keys/joe.key" diff --git a/playbooks/secure_example/vars/edxapp_ref_vars.yml b/playbooks/secure_example/vars/edxapp_ref_vars.yml deleted file mode 100644 index 39c0c4b9426..00000000000 --- a/playbooks/secure_example/vars/edxapp_ref_vars.yml +++ /dev/null @@ -1,211 +0,0 @@ ---- -#Use YAML references (& and *) and hash merge <<: to factor out shared settings -#see http://atechie.net/2009/07/merging-hashes-in-yaml-conf-files/ -lms_auth_config: &lms_auth - 'ANALYTICS_API_KEY': 'hidden-prod' - 'AWS_ACCESS_KEY_ID': 'hidden-prod' - 'AWS_SECRET_ACCESS_KEY': 'hidden-prod' - 'CONTENTSTORE': - 'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore' - 'OPTIONS': - 'db': 'hidden-prod' - 'host': [ 'hidden-prod', 'hidden-prod'] - 'password': 'hidden-prod' - 'port': 0000 - 'user': 'hidden-prod' - 'DATABASES': - 'default': { 'ENGINE': 'hidden-prod', - 'HOST': 'hidden-prod', 'NAME': 'hidden-prod', - 'PASSWORD': 'hidden-prod', 'PORT': 0000, - 'USER': 'hidden-prod'} - 'MODULESTORE': - 'default': - 'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore' - 'OPTIONS': &lms_modulestore_default_options - 'collection': 'hidden-prod' - 'db': 'hidden-prod' - 'default_class': 'hidden-prod' - 'fs_root': 'hidden-prod' - 'host': [ 'hidden-prod', 'hidden-prod'] - 'password': 'hidden-prod' - 'port': 0000 - 'render_template': 'hidden-prod' - 'user': 'hidden-prod' - 'OPEN_ENDED_GRADING_INTERFACE': { 'hidden-prod': 'hidden-prod', - 'password': 'hidden-prod', 'hidden-prod': 'hidden-prod', - 'staff_grading': 'hidden-prod', 'hidden-prod': 'hidden-prod', - 'username': 'hidden-prod'} - 'PEARSON_TEST_PASSWORD': 'hidden-prod' - 'SECRET_KEY': 'hidden-prod' - 'XQUEUE_INTERFACE': - 'basic_auth': [ 'hidden-prod', 'hidden-prod'] - 'django_auth': { 'password': 'hidden-prod', - 'username': 'hidden-prod'} - 'url': 'hidden-prod' - -lms_env_config: &lms_env - 'CERT_QUEUE': 'certificates' -# 'COURSE_LISTINGS': -# 'default': ['MITx/6.002x/2012_Fall'] -# 'stage-berkeley': [ 'BerkeleyX/CS169/fa12'] -# 'stage-harvard': [ 'HarvardX/CS50/2012H'] -# 'stage-mit': [ 'MITx/3.091/MIT_2012_Fall'] -# 'stage-num': [ 'MITx/6.002x-NUM/2012_Fall_NUM'] -# 'stage-sjsu': [ 'MITx/6.002x-EE98/2012_Fall_SJSU'] - 'LOCAL_LOGLEVEL': 'INFO' -# 'META_UNIVERSITIES': -# 'UTx': [ 'UTAustinX'] - 'MITX_FEATURES': - 'AUTH_USE_OPENID_PROVIDER': true - 'CERTIFICATES_ENABLED': true - 'ENABLE_DISCUSSION_SERVICE': true - 'ENABLE_INSTRUCTOR_ANALYTICS': true - 'ENABLE_PEARSON_HACK_TEST': false - 'SUBDOMAIN_BRANDING': false - 'SUBDOMAIN_COURSE_LISTINGS': false -# 'SUBDOMAIN_BRANDING': -# 'stage-berkeley': 'BerkeleyX' -# 'stage-harvard': 'HarvardX' -# 'stage-mit': 'MITx' -# 'stage-num': 'MITx' -# 'stage-sjsu': 'MITx' -# 'VIRTUAL_UNIVERSITIES': [] - 'WIKI_ENABLED': true - 'SYSLOG_SERVER': 'hidden-prod' - 'SITE_NAME': 'hidden-prod' - 'LOG_DIR': 'hidden-prod' - 'MEDIA_URL': 'hidden-prod' - 'BOOK_URL': 'hidden-prod' - 'ANALYTICS_SERVER_URL': 'hidden-prod' - 'DEFAULT_FROM_EMAIL': 'hidden-stage' - 'DEFAULT_FEEDBACK_EMAIL': 'hidden-stage' - 'ADMINS' : - - ['name', 'email'] - 'TIME_ZONE': 'America/New_York' - 'CACHES': &lms_caches - 'default': - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'KEY_PREFIX': 'hidden-prod' - 'LOCATION': [ 'hidden-prod', - 'hidden-prod'] - 'general': - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'KEY_PREFIX': 'hidden-prod' - 'LOCATION': [ 'hidden-prod', - 'hidden-prod'] - 'mongo_metadata_inheritance': - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'TIMEOUT': 300 - 'KEY_PREFIX': 'hidden-prod' - 'LOCATION': [ 'hidden-prod', - 'hidden-prod'] - 'staticfiles': - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'KEY_PREFIX': 'hidden-prod' - 'LOCATION': [ 'hidden-prod', - 'hidden-prod'] - 'COMMENTS_SERVICE_URL': 'hidden-prod' - 'LOGGING_ENV': 'hidden-prod' - 'SESSION_COOKIE_DOMAIN': 'hidden-prod' - 'COMMENTS_SERVICE_KEY': 'hidden-prod' - -cms_auth_config: - 'AWS_ACCESS_KEY_ID': 'hidden-prod' - 'AWS_SECRET_ACCESS_KEY': 'hidden-prod' - 'CONTENTSTORE': - 'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore' - 'OPTIONS': - 'db': 'hidden-prod' - 'host': [ 'hidden-prod', 'hidden-prod'] - 'password': 'hidden-prod' - 'port': 0000 - 'user': 'hidden-prod' - 'DATABASES': - 'default': { 'ENGINE': 'hidden-prod', - 'HOST': 'hidden-prod', 'NAME': 'hidden-prod', - 'PASSWORD': 'hidden-prod', 'PORT': 0000, - 'USER': 'hidden-prod'} - 'MODULESTORE': - 'default': - 'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore' - 'OPTIONS': - 'collection': 'hidden-prod' - 'db': 'hidden-prod' - 'default_class': 'hidden-prod' - 'fs_root': 'hidden-prod' - 'host': [ 'hidden-prod', 'hidden-prod'] - 'password': 'hidden-prod' - 'port': 0000 - 'render_template': 'hidden-prod' - 'user': 'hidden-prod' - 'direct': - 'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore' - 'OPTIONS': - 'collection': 'hidden-prod' - 'db': 'hidden-prod' - 'default_class': 'hidden-prod' - 'fs_root': 'hidden-prod' - 'host': [ 'hidden-prod', 'hidden-prod'] - 'password': 'hidden-prod' - 'port': 0000 - 'render_template': 'hidden-prod' - 'user': 'hidden-prod' - 'SECRET_KEY': 'hidden-prod' - -cms_env_config: - 'CACHES': - 'default': - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'KEY_PREFIX': 'cms.edx.org' - 'LOCATION': [ "deploycache-large.foo-bar.amazonaws.com:11211" ] - 'mongo_metadata_inheritance': - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'TIMEOUT': 300 - 'KEY_PREFIX': 'cms.edx.org' - 'LOCATION': [ "deploycache-large.foo-bar.amazonaws.com:11211" ] - 'staticfiles': - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'KEY_PREFIX': 'cms.edx.org' - 'LOCATION': [ "deploycache-large.foo-bar.amazonaws.com:11211" ] - 'general': - 'KEY_PREFIX': 'hidden-prod' - 'LOCATION': [ 'hidden-prod', - 'hidden-prod'] - 'LOG_DIR': '{{ COMMON_LOG_DIR }}/edx' - 'LOGGING_ENV': 'cms-dev' - 'SITE_NAME': 'studio.cms-dev.m.edx.org' - 'SYSLOG_SERVER': 'syslog.a.m.i4x.org' - 'LMS_BASE': 'cms-dev.m.edx.org' - 'SESSION_COOKIE_DOMAIN': '.cms-dev.m.edx.org' - 'SEGMENT_IO_KEY': 'hidden-prod' - 'MITX_FEATURES': - 'DISABLE_COURSE_CREATION': false - 'SEGMENT_IO': false - -lms_preview_auth_config: - <<: *lms_auth - 'MODULESTORE': - 'default': - 'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore' - 'OPTIONS': *lms_modulestore_default_options - -lms_preview_env_config: - <<: *lms_env - 'SITE_NAME': 'preview.class.stanford.edu' - 'COMMENTS_SERVICE_KEY': false - 'CACHES': - <<: *lms_caches - 'general': - 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache' - 'KEY_PREFIX': 'preview.edx.org' - 'KEY_FUNCTION': 'util.memcache.safe_key' - 'LOCATION': [ 'vpc-974dbeff-cache.oyg26r.0001.usw1.cache.amazonaws.com:12345', - 'vpc-974dbeff-cache.oyg26r.0002.usw1.cache.amazonaws.com:12345' ] - diff --git a/playbooks/secure_example/vars/gerrit.yml b/playbooks/secure_example/vars/gerrit.yml deleted file mode 100644 index 7be03ffd68a..00000000000 --- a/playbooks/secure_example/vars/gerrit.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -gerrit_github_client_id: alskdjdfkjasdjfsdlfkj -gerrit_github_client_secret: 0938908450deffaaa87665a555a6fc6de5777f77f -gerrit_db_hostname: somedb.88374jhyehf.us-east-1.rds.amazonaws.com -gerrit_db_admin_username: adminuser -gerrit_db_admin_password: adminpassword -gerrit_db_password: gerrituserpassword -gerrit_artifact_s3_bucket: - name: some-s3-bucket - aws_access_key_id: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" - aws_secret_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" -gerrit_hostname: "gerrit.example.com" -gerrit_smtp_enabled: false -gerrit_email: gerrit@example.com -gerrit_smtp_server: smtp.example.com -gerrit_smtp_encryption: none -gerrit_smtp_user: someuser -gerrit_smtp_pass: somepassword diff --git a/playbooks/security.sh b/playbooks/security.sh new file mode 100644 index 00000000000..c96994936b5 --- /dev/null +++ b/playbooks/security.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +set -ex + +# https://alas.aws.amazon.com/ALAS-2015-473.html + +check_vulnerability() { + cat > glibc_check.c << EOF +#include +#include +#include +#include +#include + +#define CANARY "in_the_coal_mine" + +struct { + char buffer[1024]; + char canary[sizeof(CANARY)]; +} temp = { "buffer", CANARY }; + +int main(void) { + struct hostent resbuf; + struct hostent *result; + int herrno; + int retval; + + /*** strlen (name) = size_needed - sizeof (*host_addr) - sizeof (*h_addr_ptrs) - 1; ***/ + size_t len = sizeof(temp.buffer) - 16*sizeof(unsigned char) - 2*sizeof(char *) - 1; + char name[sizeof(temp.buffer)]; + memset(name, '0', len); + name[len] = '\0'; + + retval = gethostbyname_r(name, &resbuf, temp.buffer, sizeof(temp.buffer), &result, &herrno); + + if (strcmp(temp.canary, CANARY) != 0) { + puts("vulnerable"); + exit(EXIT_SUCCESS); + } + if (retval == ERANGE) { + puts("not vulnerable"); + exit(EXIT_SUCCESS); + } + puts("should not happen"); + exit(EXIT_FAILURE); +} +/* from http://www.openwall.com/lists/oss-security/2015/01/27/9 */ +EOF + gcc glibc_check.c -o glibc_check + ./glibc_check +} + +upgrade_packages() { + sudo apt-get clean + sudo mv /etc/apt/sources.list /tmp/sources.list.bk + sudo sh -c 'echo "deb http://http.us.debian.org/debian wheezy main contrib non-free" >> /etc/apt/sources.list' + sudo sh -c 'echo "deb http://security.debian.org wheezy/updates main contrib non-free" >> /etc/apt/sources.list' + sudo apt-get update -y + sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes --only-upgrade libgcc1 bash + sudo mv /tmp/sources.list.bk /etc/apt/sources.list + sudo apt-get clean + sudo /etc/init.d/ssh restart + [ "$(check_vulnerability)" == "not vulnerable" ] +} + +upgrade_packages diff --git a/playbooks/security.yml b/playbooks/security.yml new file mode 100644 index 00000000000..54e149c2367 --- /dev/null +++ b/playbooks/security.yml @@ -0,0 +1,5 @@ +- name: Apply security role + hosts: all + become: True + roles: + - security diff --git a/playbooks/set_hostname.yml b/playbooks/set_hostname.yml new file mode 100644 index 00000000000..465026fccea --- /dev/null +++ b/playbooks/set_hostname.yml @@ -0,0 +1,19 @@ +# This is a utility play to set a hostname +# on a server + +- name: Set hostname + hosts: all + become: True + gather_facts: False + tasks: + - name: "Ensure we have a hostname" + fail: msg="you must pass a hostname_fqdn var into this play" + when: hostname_fqdn is not defined + - name: Set hostname + hostname: name={{ hostname_fqdn.split('.')[0] }} + - name: Update /etc/hosts + lineinfile: + dest: /etc/hosts + regexp: "^127\\.0\\.1\\.1" + line: "127.0.1.1{{ '\t' }}{{ hostname_fqdn.split('.')[0] }}{{ '\t' }}{{ hostname_fqdn }}{{ '\t' }}localhost" + state: present diff --git a/playbooks/snort.yml b/playbooks/snort.yml new file mode 100644 index 00000000000..e2b75b32974 --- /dev/null +++ b/playbooks/snort.yml @@ -0,0 +1,18 @@ +- name: Deploy snort IDS + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - snort + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/splunk.yml b/playbooks/splunk.yml new file mode 100644 index 00000000000..7555bf3aba7 --- /dev/null +++ b/playbooks/splunk.yml @@ -0,0 +1,9 @@ +# This is commented out since it should not be run normally. +# # Usage: ansible-playbook splunk.yml -e@/path/to/environment-deployment.yml + +# - name: Deploy Splunk +# hosts: all +# become: True +# gather_facts: True +# roles: +# - splunk-server diff --git a/playbooks/splunk_config_backup.yml b/playbooks/splunk_config_backup.yml new file mode 100644 index 00000000000..0345b13792c --- /dev/null +++ b/playbooks/splunk_config_backup.yml @@ -0,0 +1,19 @@ +- name: Backup splunk configurations + hosts: all + vars: + splunk_config_dir: "/opt/splunk/etc" + splunk_host: "{{ splunk_host_id }}" + splunk_config_archive: "{{ splunk_host }}-{{ date }}.tar.gz" + splunk_s3_backup_tempdir: "{{ splunk_backup_dir }}" + splunk_s3_bucket: "{{ splunk_s3_backups_bucket }}" + tasks: + - set_fact: + date: "{{ lookup('pipe', 'date +%Y-%m-%dT%H%M') }}" + - name: archive splunk configuration dir + command: sudo tar -cpzf "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" {{ splunk_config_dir }} + register: tar_result + failed_when: tar_result.rc > 1 + - name: copy tarball to s3 bucket + command: sudo aws s3 cp "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" s3://{{ splunk_s3_bucket }} + - name: cleanup backup file + shell: sudo rm "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" diff --git a/playbooks/splunkforwarder.yml b/playbooks/splunkforwarder.yml new file mode 100644 index 00000000000..b5b5c43e558 --- /dev/null +++ b/playbooks/splunkforwarder.yml @@ -0,0 +1,6 @@ +- name: Install Splunk Forwarder + hosts: all + become: True + gather_facts: True + roles: + - splunkforwarder diff --git a/playbooks/stop_all_edx_services.yml b/playbooks/stop_all_edx_services.yml new file mode 100644 index 00000000000..c0a8c1050b1 --- /dev/null +++ b/playbooks/stop_all_edx_services.yml @@ -0,0 +1,9 @@ +- name: Stop all services + hosts: all + become: True + gather_facts: False + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - stop_all_edx_services diff --git a/playbooks/tableau.yml b/playbooks/tableau.yml new file mode 100644 index 00000000000..bb828367e03 --- /dev/null +++ b/playbooks/tableau.yml @@ -0,0 +1,10 @@ +- name: Deploy tableau + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + CLUSTER_NAME: 'tableau' + serial: "{{ serial_count }}" + roles: + - tableau diff --git a/playbooks/tableau_de.yml b/playbooks/tableau_de.yml new file mode 100644 index 00000000000..1b293a816f5 --- /dev/null +++ b/playbooks/tableau_de.yml @@ -0,0 +1,11 @@ +- name: Deploy tableau + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + CLUSTER_NAME: 'tableau' + serial: "{{ serial_count }}" + roles: + - tableau + - tableau_de diff --git a/playbooks/tanaguru.yml b/playbooks/tanaguru.yml new file mode 100644 index 00000000000..2855a650150 --- /dev/null +++ b/playbooks/tanaguru.yml @@ -0,0 +1,9 @@ +- name: Deploy Tanaguru + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mysql + - tanaguru diff --git a/playbooks/test-rolling.yml b/playbooks/test-rolling.yml new file mode 100644 index 00000000000..a07f2a285fc --- /dev/null +++ b/playbooks/test-rolling.yml @@ -0,0 +1,41 @@ +- hosts: all + become: True + vars: + # By default take instances in and out of the elb(s) they + # are attached to + # To skip elb operations use "-e elb_pre_post=fase" + elb_pre_post: true + # Number of instances to operate on at a time + serial_count: 1 + serial: "{{ serial_count }}" + pre_tasks: + - action: ec2_metadata_facts + when: elb_pre_post + - debug: + var: "{{ ansible_ec2_instance_id }}" + when: elb_pre_post + - name: Instance De-register + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + region: us-east-1 + state: absent + wait_timeout: 60 + become: False + when: elb_pre_post + tasks: + - shell: echo "test" + post_tasks: + - debug: + var: "{{ ansible_ec2_instance_id }}" + when: elb_pre_post + - name: Register instance in the elb + local_action: ec2_elb + args: + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ ec2_elbs }}" + region: us-east-1 + state: present + wait_timeout: 60 + become: False + when: elb_pre_post diff --git a/playbooks/testcourses.yml b/playbooks/testcourses.yml new file mode 100644 index 00000000000..b1fe67c13e8 --- /dev/null +++ b/playbooks/testcourses.yml @@ -0,0 +1,16 @@ +- name: Create courses for testing + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - testcourses + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + diff --git a/playbooks/tools-gp.yml b/playbooks/tools-gp.yml new file mode 100644 index 00000000000..d3338a6b93d --- /dev/null +++ b/playbooks/tools-gp.yml @@ -0,0 +1,18 @@ +# Creating a new ad hoc reporting box: +# ansible-playbook -i 'reporting.example.com,' ./tools-gp.yml -e@/var/path/deployment.yml -e@/vars/path/environnment-deployment.yml -e@/vars/path/ad_hoc_reporting_replica_db_hosts.yml +# Updating the users on an existing box: +# ansible-playbook -i 'reporting.example.com,' ./tools-gp.yml --tags users -e@/var/path/users.yml -e@/vars/path/environnment-deployment.yml +- name: Deploy Ad Hoc Reporting Scripts + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - ad_hoc_reporting + - ghost + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/tools_jenkins.yml b/playbooks/tools_jenkins.yml new file mode 100644 index 00000000000..57848d95a7d --- /dev/null +++ b/playbooks/tools_jenkins.yml @@ -0,0 +1,39 @@ +# Documentation on updating tools-edx-jenkins: https://openedx.atlassian.net/wiki/display/EdxOps/Updating+tools-edx-jenkins + +# Updating or creating a new install of tools_jenkins (will restart Jenkins) +# ansible-playbook -i tools-edx-jenkins.m.edx.org, tools_jenkins.yml -e@/path/to/secure-config/tools-edx.yml + +# Update tools_jenkins with new plugins (will not restart Jenkins): +# ansible-playbook -i tools-edx-jenkins.m.edx.org, tools_jenkins.yml -e@/path/to/secure-config/tools-edx.yml --tags install:plugins + +# Configure an instance with the tool jenkins. +- name: Configure Jenkins instance(s) + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + # jenkins_master role does extra tweaks to datadog if COMMON_ENABLE_DATADOG is set + # so this needs to run early. + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - tools_jenkins + # This requires an override of the following form: + # SPLUNKFORWARDER_LOG_ITEMS: + # - source: /edx/var/jenkins/jobs/*/builds/*/log + # index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-jenkins' + # sourcetype: jenkins_build + # followSymlink: false + # crcSalt: + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/users.yml b/playbooks/users.yml new file mode 100644 index 00000000000..7f7e05180a5 --- /dev/null +++ b/playbooks/users.yml @@ -0,0 +1,12 @@ +# Simple playbook for creating/updating/removing users on a box +# If you run it against a box with automated users and don't pass them in it will break them +# ansible-playbook -i 'host.example.com,' ./tools-gp.yml -e@/var/path/users.yml -e@/vars/path/environnment-deployment.yml +- name: Update users + hosts: all + become: True + gather_facts: True + roles: + - role: user + user_info: "{{ COMMON_USER_INFO }}" + tags: + - users diff --git a/playbooks/util/elb_reg.py b/playbooks/util/elb_reg.py deleted file mode 100755 index 317b7f9299a..00000000000 --- a/playbooks/util/elb_reg.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -from argparse import ArgumentParser -import time -import boto - - -def await_elb_instance_state(lb, instance_id, awaited_state): - """blocks until the ELB reports awaited_state - for instance_id. - lb = loadbalancer object - instance_id : instance_id (string) - awaited_state : state to poll for (string)""" - - start_time = time.time() - while True: - state = lb.get_instance_health([instance_id])[0].state - if state == awaited_state: - print "Load Balancer {lb} is in awaited state " \ - "{awaited_state}, proceeding.".format( - lb=lb.dns_name, - awaited_state=awaited_state) - break - else: - print "Checking again in 2 seconds. Elapsed time: {0}".format( - time.time() - start_time) - time.sleep(2) - - -def deregister(): - """Deregister the instance from all ELBs and wait for the ELB - to report them out-of-service""" - - for lb in active_lbs: - lb.deregister_instances([args.instance]) - await_elb_instance_state(lb, args.instance, 'OutOfService') - - -def register(): - """Register the instance for all ELBs and wait for the ELB - to report them in-service""" - for lb in active_lbs: - lb.register_instances([args.instance]) - await_elb_instance_state(lb, args.instance, 'InService') - - -def parse_args(): - parser = ArgumentParser() - subparsers = parser.add_subparsers(dest="sp_action") - subparsers.add_parser('register', help='register an instance') - subparsers.add_parser('deregister', help='deregister an instance') - - parser.add_argument('-e', '--elbs', required=True, - help="Comma separated list of ELB names") - parser.add_argument('-i', '--instance', required=True, - help="Single instance to operate on") - return parser.parse_args() - -if __name__ == '__main__': - args = parse_args() - - elb = boto.connect_elb() - elbs = elb.get_all_load_balancers() - active_lbs = sorted( - lb - for lb in elbs - if lb.name in args.elbs.split(',')) - - print "ELB : " + str(args.elbs.split(',')) - print "Instance: " + str(args.instance) - if args.sp_action == 'deregister': - print "Deregistering an instance" - deregister() - elif args.sp_action == 'register': - print "Registering an instance" - register() diff --git a/playbooks/util/github_oauth_token.py b/playbooks/util/github_oauth_token.py deleted file mode 100755 index 56c91ddb663..00000000000 --- a/playbooks/util/github_oauth_token.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python - -""" -Generate a GitHub OAuth token with a particular -set of permissions. - -Usage: - - github_oauth_token.py USERNAME PASSWORD [SCOPE ...] - -Example: - - github_oauth_token.py jenkins_user repo:status public_repo - -This will prompt the user for the password. -""" - -import sys -import requests -import json -import getpass -from textwrap import dedent - -USAGE = "Usage: {0} USERNAME NOTE [SCOPE ...]" - - -def parse_args(arg_list): - """ - Return a dict of the command line arguments. - Prints an error message and exits if the arguments are invalid. - """ - if len(arg_list) < 4: - print USAGE.format(arg_list[0]) - exit(1) - - # Prompt for the password - password = getpass.getpass() - - return { - 'username': arg_list[1], - 'password': password, - 'note': arg_list[2], - 'scopes': arg_list[3:], - } - - -def get_oauth_token(username, password, scopes, note): - """ - Create a GitHub OAuth token with the given scopes. - If unsuccessful, print an error message and exit. - - Returns a tuple `(token, scopes)` - """ - params = {'scopes': scopes, 'note': note} - - response = response = requests.post( - 'https://api.github.com/authorizations', - data=json.dumps(params), - auth=(username, password) - ) - - if response.status_code != 201: - print dedent(""" - Could not create OAuth token. - HTTP status code: {0} - Content: {1} - """.format(response.status_code, response.text)).strip() - exit(1) - - try: - token_data = response.json() - return token_data['token'], token_data['scopes'] - - except TypeError: - print "Could not parse response data." - exit(1) - - except KeyError: - print "Could not retrieve data from response." - exit(1) - - -def main(): - arg_dict = parse_args(sys.argv) - token, scopes = get_oauth_token( - arg_dict['username'], arg_dict['password'], - arg_dict['scopes'], arg_dict['note'] - ) - - print "Token: {0}".format(token) - print "Scopes: {0}".format(", ".join(scopes)) - - -if __name__ == "__main__": - main() diff --git a/playbooks/vagrant-analytics.yml b/playbooks/vagrant-analytics.yml new file mode 100644 index 00000000000..8fb1a9eb496 --- /dev/null +++ b/playbooks/vagrant-analytics.yml @@ -0,0 +1,44 @@ +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + migrate_db: 'yes' + devstack: true + disable_edx_services: true + mongo_enable_journal: false + EDXAPP_NO_PREREQ_INSTALL: 0 + COMMON_SSH_PASSWORD_AUTH: "yes" + EDXAPP_LMS_BASE: 127.0.0.1:8000 + EDXAPP_OAUTH_ENFORCE_SECURE: false + EDXAPP_LMS_BASE_SCHEME: http + ECOMMERCE_DJANGO_SETTINGS_MODULE: "ecommerce.settings.devstack" + # When provisioning your devstack, we apply security updates + COMMON_SECURITY_UPDATES: true + SECURITY_UPGRADE_ON_ANSIBLE: true + MONGO_AUTH: false + roles: + - common + - vhost + - edx_ansible + - mysql + - edxlocal + - memcache + - mongo + - role: rabbitmq + rabbitmq_ip: 127.0.0.1 + - edxapp + - oraclejdk + - elasticsearch + - forum + - ecommerce + - role: ecomworker + ECOMMERCE_WORKER_BROKER_HOST: 127.0.0.1 + # The following two are added, relative to devstack + - analytics_api + - insights + # Leaving out browsers, relative to devstack. + - local_dev + - demo + - analytics_pipeline + - oauth_client_setup diff --git a/playbooks/vagrant-cluster.yml b/playbooks/vagrant-cluster.yml new file mode 100644 index 00000000000..c50b1797138 --- /dev/null +++ b/playbooks/vagrant-cluster.yml @@ -0,0 +1,51 @@ +- name: Configure group cluster + hosts: all + become: True + gather_facts: True + vars: + vagrant_cluster: yes + mongo_cluster_members: + - "cluster1" + - "cluster2" + - "cluster3" + MONGO_CLUSTER_KEY: 'password' + ELASTICSEARCH_CLUSTERED: yes + MARIADB_CLUSTERED: yes + MARIADB_CREATE_DBS: no + roles: + - user + - mongo + - oraclejdk + - elasticsearch + - mariadb + - edx_ansible + +# Rabbit needs to be built serially +- name: Configure group cluster serial roles + hosts: all + become: True + serial: 1 + gather_facts: True + vars: + RABBITMQ_CLUSTERED_HOSTS: + - "rabbit@cluster1" + - "rabbit@cluster2" + - "rabbit@cluster3" + rabbitmq_ip: "" + roles: + - rabbitmq + +# There are race conditions creating DBs +# in MariaDB occasionally so this play will work +# but will also show as failed +- name: Configure group with tasks that will always fail + hosts: all + become: True + gather_facts: True + vars: + MARIADB_CLUSTERED: yes + MARIADB_CREATE_DBS: yes + vars_files: + - "roles/analytics_api/defaults/main.yml" + roles: + - mariadb diff --git a/playbooks/vagrant-devstack.yml b/playbooks/vagrant-devstack.yml deleted file mode 100644 index e5ae69ead44..00000000000 --- a/playbooks/vagrant-devstack.yml +++ /dev/null @@ -1,26 +0,0 @@ -- name: Configure instance(s) - hosts: all - sudo: True - gather_facts: True - vars: - migrate_db: "yes" - openid_workaround: True - devstack: True - edx_platform_version: 'master' - mongo_enable_journal: False - EDXAPP_NO_PREREQ_INSTALL: 0 - COMMON_MOTD_TEMPLATE: "devstack_motd.tail.j2" - vars_files: - - "group_vars/all" - roles: - - edx_ansible - - edxlocal - - mongo - - edxapp - - oraclejdk - - elasticsearch - - forum - - ora - - browsers - - local_dev - - demo diff --git a/playbooks/vagrant-fullstack.yml b/playbooks/vagrant-fullstack.yml deleted file mode 100644 index 1da9d78b6b0..00000000000 --- a/playbooks/vagrant-fullstack.yml +++ /dev/null @@ -1,36 +0,0 @@ -- name: Configure instance(s) - hosts: all - sudo: True - gather_facts: True - vars: - migrate_db: "yes" - openid_workaround: True - EDXAPP_LMS_NGINX_PORT: '80' - edx_platform_version: 'master' - vars_files: - - "group_vars/all" - roles: - - edx_ansible - - gh_users - - role: nginx - nginx_sites: - - cms - - lms - - ora - - forum - - xqueue - nginx_default_sites: - - lms - - cms - - edxlocal - - mongo - - { role: 'edxapp', celery_worker: True } - - edxapp - - demo - - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - - oraclejdk - - elasticsearch - - forum - - { role: "xqueue", update_users: True } - - ora - - edx_ansible diff --git a/playbooks/vagrant/inventory.ini b/playbooks/vagrant/inventory.ini deleted file mode 100644 index 2d480ab6601..00000000000 --- a/playbooks/vagrant/inventory.ini +++ /dev/null @@ -1,2 +0,0 @@ -[vagrant] -192.168.33.10 diff --git a/playbooks/vpc_admin.yml b/playbooks/vpc_admin.yml new file mode 100644 index 00000000000..be7360d32ad --- /dev/null +++ b/playbooks/vpc_admin.yml @@ -0,0 +1,20 @@ +# ansible-playbook -vvv -c ssh -i admin_url, vpc_admin.yml -e "@path_to_common_overrides" -e "@path_to_deployment_specific_overrides" +# Configure an admin instance with jenkins and asgard. +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - edx_ansible + - user + - jenkins_admin + - hotg + - alton + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/worker.yml b/playbooks/worker.yml new file mode 100644 index 00000000000..ba7f5636739 --- /dev/null +++ b/playbooks/worker.yml @@ -0,0 +1,24 @@ +- name: Deploy worker + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: edxapp + celery_worker: True + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: minos + when: COMMON_ENABLE_MINOS + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: jwt_signature + when: CONFIGURE_JWTS + app_name: lms + app_config_file: "{{ COMMON_CFG_DIR }}/lms.yml" + app_config_owner: "{{ edxapp_user }}" + app_config_group: "{{ common_web_group }}" + app_config_mode: 0640 diff --git a/playbooks/xqueue.yml b/playbooks/xqueue.yml new file mode 100644 index 00000000000..4e23fcb06b2 --- /dev/null +++ b/playbooks/xqueue.yml @@ -0,0 +1,23 @@ +- name: Deploy xqueue + hosts: all + become: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: automated + AUTOMATED_USERS: "{{ XQUEUE_AUTOMATED_USERS | default({}) }}" + - role: nginx + nginx_sites: + - xqueue + - xqueue + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'xqueue' + when: XQUEUE_HERMES_ENABLED diff --git a/playbooks/xqwatcher.yml b/playbooks/xqwatcher.yml new file mode 100644 index 00000000000..8798dda77cc --- /dev/null +++ b/playbooks/xqwatcher.yml @@ -0,0 +1,21 @@ +- name: Deploy xqueue-watcher + hosts: all + become: True + gather_facts: True + vars: + COMMON_APP_DIR: "/edx/app" + common_web_group: "www-data" + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - xqwatcher + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/requirements.txt b/requirements.txt index 38febd4cea6..5293f81318b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,116 @@ -ansible==1.4.4 -PyYAML==3.10 -Jinja2==2.7.2 -MarkupSafe==0.18 -argparse==1.2.1 -boto==2.20.1 -ecdsa==0.10 -paramiko==1.12.0 -pycrypto==2.6.1 -wsgiref==0.1.2 -GitPython==0.3.2.RC1 -pymongo==2.4.1 -requests==2.2.1 +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +ansible==2.10.7 + # via -r requirements/base.in +ansible-base==2.10.17 + # via ansible +awscli==1.32.2 + # via -r requirements/base.in +bcrypt==3.1.7 + # via + # -c requirements/constraints.txt + # paramiko +boto==2.49.0 + # via -r requirements/base.in +boto3==1.34.2 + # via -r requirements/base.in +botocore==1.34.2 + # via + # awscli + # boto3 + # s3transfer +certifi==2023.11.17 + # via requests +cffi==1.16.0 + # via + # bcrypt + # cryptography + # pynacl +charset-normalizer==3.3.2 + # via requests +colorama==0.4.4 + # via awscli +cryptography==41.0.7 + # via + # ansible-base + # paramiko +datadog==0.8.0 + # via -r requirements/base.in +decorator==5.1.1 + # via + # datadog + # networkx +docopt==0.6.2 + # via -r requirements/base.in +docutils==0.16 + # via awscli +ecdsa==0.13.3 + # via -r requirements/base.in +idna==3.6 + # via requests +jinja2==2.8 + # via + # -r requirements/base.in + # ansible-base +jmespath==1.0.1 + # via + # boto3 + # botocore +markupsafe==2.0.1 + # via + # -r requirements/base.in + # jinja2 +mysqlclient==1.4.6 + # via -r requirements/base.in +networkx==1.11 + # via -r requirements/base.in +packaging==23.2 + # via ansible-base +paramiko==2.4.2 + # via -r requirements/base.in +pathlib2==2.3.0 + # via -r requirements/base.in +prettytable==0.7.2 + # via -r requirements/base.in +pyasn1==0.5.1 + # via + # paramiko + # rsa +pycparser==2.21 + # via cffi +pycryptodome==3.19.0 + # via -r requirements/base.in +pymongo==3.9.0 + # via -r requirements/base.in +pynacl==1.5.0 + # via paramiko +python-dateutil==2.8.2 + # via botocore +pyyaml==6.0.1 + # via + # -r requirements/base.in + # ansible-base + # awscli +requests==2.31.0 + # via + # -r requirements/base.in + # datadog +rsa==4.7.2 + # via awscli +s3transfer==0.9.0 + # via + # awscli + # boto3 +six==1.16.0 + # via + # bcrypt + # pathlib2 + # python-dateutil +urllib3==1.26.18 + # via + # botocore + # requests diff --git a/requirements/asym-crypto-yaml.txt b/requirements/asym-crypto-yaml.txt new file mode 100644 index 00000000000..8eb9ae77f1e --- /dev/null +++ b/requirements/asym-crypto-yaml.txt @@ -0,0 +1 @@ +asym-crypto-yaml diff --git a/requirements/aws.in b/requirements/aws.in new file mode 100644 index 00000000000..f76449ccdef --- /dev/null +++ b/requirements/aws.in @@ -0,0 +1,8 @@ +# Python dependencies for the aws role + +awscli +boto +boto3 +futures ; python_version == "2.7" # via s3transfer +s3cmd +pyyaml==5.3.1 diff --git a/requirements/base.in b/requirements/base.in new file mode 100644 index 00000000000..e38281a4e19 --- /dev/null +++ b/requirements/base.in @@ -0,0 +1,21 @@ +-c constraints.txt +# Standard dependencies for Ansible runs + +ansible<3.0.0 +awscli +boto +boto3 +datadog==0.8.0 +docopt==0.6.2 +ecdsa==0.13.3 +Jinja2==2.8 +markupsafe==2.0.1 # Pining this until we upgrade jinja2, as in newer version on markupsafe soft_unicode is removed and jinja2==2.8 use this pkg +mysqlclient==1.4.6 # Needed for the mysql_db module, 1,4,6 is the newest version that support python 2 which we really need to stop using +networkx==1.11 +paramiko==2.4.2 +pathlib2==2.3.0 +prettytable==0.7.2 +pycryptodome +pymongo==3.9.0 # Needed for the mongo_* modules (playbooks/library/mongo_*) +PyYAML +requests diff --git a/requirements/cloudflare.in b/requirements/cloudflare.in new file mode 100644 index 00000000000..4704073c2c8 --- /dev/null +++ b/requirements/cloudflare.in @@ -0,0 +1,4 @@ +# Needed for CloudFlare cache hit rate job in util/jenkins + +requests +click diff --git a/requirements/common_constraints.txt b/requirements/common_constraints.txt new file mode 100644 index 00000000000..15aafb293da --- /dev/null +++ b/requirements/common_constraints.txt @@ -0,0 +1,23 @@ +# A central location for most common version constraints +# (across edx repos) for pip-installation. +# +# Similar to other constraint files this file doesn't install any packages. +# It specifies version constraints that will be applied if a package is needed. +# When pinning something here, please provide an explanation of why it is a good +# idea to pin this package across all edx repos, Ideally, link to other information +# that will help people in the future to remove the pin when possible. +# Writing an issue against the offending project and linking to it here is good. +# +# Note: Changes to this file will automatically be used by other repos, referencing +# this file from Github directly. It does not require packaging in edx-lint. + + +# using LTS django version +Django<4.0 + +# elasticsearch>=7.14.0 includes breaking changes in it which caused issues in discovery upgrade process. +# elastic search changelog: https://www.elastic.co/guide/en/enterprise-search/master/release-notes-7.14.0.html +elasticsearch<7.14.0 + +# django-simple-history>3.0.0 adds indexing and causes a lot of migrations to be affected +django-simple-history==3.0.0 diff --git a/requirements/constraints.txt b/requirements/constraints.txt new file mode 100644 index 00000000000..aaa989ce567 --- /dev/null +++ b/requirements/constraints.txt @@ -0,0 +1,3 @@ +-c common_constraints.txt + +bcrypt<3.2.0 # 3.2.0 dropped support for python 2.7 diff --git a/requirements/elasticsearch.in b/requirements/elasticsearch.in new file mode 100644 index 00000000000..53cbf87db87 --- /dev/null +++ b/requirements/elasticsearch.in @@ -0,0 +1,4 @@ +# Requirements for util/elasticsearch/verify-index-copy.py + +deepdiff==3.1.0 +elasticsearch==0.4.5 diff --git a/requirements/jenkins.in b/requirements/jenkins.in new file mode 100644 index 00000000000..0f5728b084b --- /dev/null +++ b/requirements/jenkins.in @@ -0,0 +1,18 @@ +# Python dependencies for the util/jenkins/requirements.txt + +awscli +boto +boto3 +futures ; python_version == "2.7" # via s3transfer +s3cmd +pyyaml +backoff==1.4.3 +celery +click +opsgenie-sdk==0.3.1 +PyMySQL==0.9.3 +python-gnupg +redis==2.10.6 +splunk-sdk==1.6.16 # older versions have been yanked +yq +jq diff --git a/requirements/pingdom.in b/requirements/pingdom.in new file mode 100644 index 00000000000..4112c810fa6 --- /dev/null +++ b/requirements/pingdom.in @@ -0,0 +1,6 @@ +# Requirements for util/pingdom/create_pingdom_alerts.py + +click==6.7 +PyYAML +requests +six==1.14.0 diff --git a/requirements/pip-tools.in b/requirements/pip-tools.in new file mode 100644 index 00000000000..3f1b64ae937 --- /dev/null +++ b/requirements/pip-tools.in @@ -0,0 +1,4 @@ +# Just the dependencies to run pip-tools, mainly for the "upgrade" make target +-c constraints.txt + +pip-tools # Contains pip-compile, used to generate pip requirements files diff --git a/requirements/pip-tools.txt b/requirements/pip-tools.txt new file mode 100644 index 00000000000..93a9cee28cb --- /dev/null +++ b/requirements/pip-tools.txt @@ -0,0 +1,31 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +build==1.0.3 + # via pip-tools +click==8.1.7 + # via pip-tools +importlib-metadata==7.0.0 + # via build +packaging==23.2 + # via build +pip-tools==7.3.0 + # via -r requirements/pip-tools.in +pyproject-hooks==1.0.0 + # via build +tomli==2.0.1 + # via + # build + # pip-tools + # pyproject-hooks +wheel==0.42.0 + # via pip-tools +zipp==3.17.0 + # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# pip +# setuptools diff --git a/requirements/pip.in b/requirements/pip.in new file mode 100644 index 00000000000..715478cdc0c --- /dev/null +++ b/requirements/pip.in @@ -0,0 +1,7 @@ +-c constraints.txt +# Core dependencies for installing other packages + +pip +setuptools +wheel + diff --git a/requirements/pip.txt b/requirements/pip.txt new file mode 100644 index 00000000000..d798b87b36c --- /dev/null +++ b/requirements/pip.txt @@ -0,0 +1,14 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +wheel==0.42.0 + # via -r requirements/pip.in + +# The following packages are considered to be unsafe in a requirements file: +pip==23.3.2 + # via -r requirements/pip.in +setuptools==69.0.2 + # via -r requirements/pip.in diff --git a/requirements/vpc-tools.in b/requirements/vpc-tools.in new file mode 100644 index 00000000000..c95980083d1 --- /dev/null +++ b/requirements/vpc-tools.in @@ -0,0 +1,5 @@ +# Requirements for the scripts in util/vpc-tools + +boto +docopt +requests diff --git a/requirements3.txt b/requirements3.txt new file mode 100644 index 00000000000..3fca4f7088e --- /dev/null +++ b/requirements3.txt @@ -0,0 +1,3 @@ +# File not removed in order to be backwards compatibility +# Use requirements.txt instead +-r requirements.txt diff --git a/test.mk b/test.mk new file mode 100644 index 00000000000..c9bdc85d077 --- /dev/null +++ b/test.mk @@ -0,0 +1,36 @@ +yml_files:=$(shell find . -name "*.yml") +json_files:=$(shell find . -name "*.json") +# $(images) is calculated in the docker.mk file + +help: test.help + +test.help: + @echo ' Tests:' + @echo ' test.syntax Run all syntax tests' + @echo ' test.syntax.json Run syntax tests on .json files' + @echo ' test.syntax.yml Run syntax tests on .yml files' + @echo ' test.syntax.jinja Run syntax tests on .j2 files' + @echo ' test.playbooks Run validation on playbooks' + @echo '' + +test: test.syntax test.playbooks + +test.syntax: test.syntax.yml test.syntax.json + +test.syntax.yml: $(patsubst %,test.syntax.yml/%,$(yml_files)) + +test.syntax.yml/%: + python -c "import sys,yaml; yaml.safe_load(open(sys.argv[1]))" $* >/dev/null + +test.syntax.json: $(patsubst %,test.syntax.json/%,$(json_files)) + +test.syntax.json/%: + jsonlint -v $* + +test.playbooks: + tests/test_playbooks.sh + +clean: test.clean + +test.clean: + rm -rf playbooks/test_output diff --git a/tests/test_mongodb_replica_set.py b/tests/test_mongodb_replica_set.py new file mode 100644 index 00000000000..f7bf3e53ecf --- /dev/null +++ b/tests/test_mongodb_replica_set.py @@ -0,0 +1,166 @@ +# Tests for mongodb_replica_set ansible module +# +# How to run these tests: +# 1. move this file to playbooks/library +# 2. rename mongodb_replica_set to mongodb_replica_set.py +# 3. python test_mongodb_replica_set.py + +import mongodb_replica_set as mrs +import unittest, mock +from six.moves.urllib.parse import quote_plus +from copy import deepcopy + +class TestNoPatchingMongodbReplicaSet(unittest.TestCase): + def test_host_port_transformation(self): + unfixed = { + 'members': [ + {'host': 'foo.bar'}, + {'host': 'bar.baz', 'port': 1234}, + {'host': 'baz.bing:54321'} + ]} + fixed = { + 'members': [ + {'host': 'foo.bar:27017'}, + {'host': 'bar.baz:1234'}, + {'host': 'baz.bing:54321'} + ]} + + mrs.fix_host_port(unfixed) + self.assertEqual(fixed, unfixed) + + fixed_2 = deepcopy(fixed) + mrs.fix_host_port(fixed_2) + self.assertEqual(fixed, fixed_2) + + def test_member_id_managed(self): + new = [ + {'host': 'foo.bar', '_id': 1}, + {'host': 'bar.baz'}, + {'host': 'baz.bing'} + ] + old = [ + {'host': 'baz.bing', '_id': 0} + ] + + fixed = deepcopy(new) + mrs.set_member_ids(fixed, old) + + #test that each id is unique + unique_ids = {m['_id'] for m in fixed} + self.assertEqual(len(unique_ids), len(new)) + + #test that it "prefers" the "matching" one in old_members + self.assertEqual(fixed[0]['_id'], new[0]['_id']) + self.assertEqual(fixed[2]['_id'], old[0]['_id']) + self.assertIn('_id', fixed[1]) + + def test_mongo_uri_escaped(self): + host = username = password = auth_database = ':!@#$%/' + port = 1234 + uri = mrs.get_mongo_uri(host=host, port=port, username=username, password=password, auth_database=auth_database) + self.assertEqual(uri, "mongodb://{un}:{pw}@{host}:{port}/{db}".format( + un=quote_plus(username), pw=quote_plus(password), + host=quote_plus(host), port=port, db=quote_plus(auth_database), + )) + + +rs_id = 'a replset id' +members = [ + {'host': 'foo.bar:1234'}, + {'host': 'bar.baz:4321'}, +] +old_rs_config = { + 'version': 1, + '_id': rs_id, + 'members': [ + {'_id': 0, 'host': 'foo.bar:1234',}, + {'_id': 1, 'host': 'bar.baz:4321',}, + ] +} +new_rs_config = { + 'version': 2, + '_id': rs_id, + 'members': [ + {'_id': 0, 'host': 'foo.bar:1234',}, + {'_id': 1, 'host': 'bar.baz:4321',}, + {'_id': 2, 'host': 'baz.bing:27017',}, + ] +} +rs_config = { + 'members': [ + {'host': 'foo.bar', 'port': 1234,}, + {'host': 'bar.baz', 'port': 4321,}, + {'host': 'baz.bing', 'port': 27017,}, + ] +} + +def init_replset_mock(f): + get_replset_initialize_mock = mock.patch.object(mrs, 'get_replset', + side_effect=(None, deepcopy(new_rs_config))) + initialize_replset_mock = mock.patch.object(mrs, 'initialize_replset') + return get_replset_initialize_mock(initialize_replset_mock(f)) + +def update_replset_mock(f): + get_replset_update_mock = mock.patch.object(mrs, 'get_replset', + side_effect=(deepcopy(old_rs_config), deepcopy(new_rs_config))) + reconfig_replset_mock = mock.patch.object(mrs, 'reconfig_replset') + return get_replset_update_mock(reconfig_replset_mock(f)) + +@mock.patch.object(mrs, 'get_rs_config_id', return_value=rs_id) +@mock.patch.object(mrs, 'client', create=True) +@mock.patch.object(mrs, 'module', create=True) +class TestPatchingMongodbReplicaSet(unittest.TestCase): + + @update_replset_mock + def test_version_managed(self, _1, _2, module, *args): + # Version set automatically on initialize + mrs.update_replset(deepcopy(rs_config)) + new_version = module.exit_json.call_args[1]['config']['version'] + self.assertEqual(old_rs_config['version'], new_version - 1) + + @init_replset_mock + def test_doc_id_managed_on_initialize(self, _1, _2, module, *args): + #old_rs_config provided by init_replset_mock via mrs.get_replset(). + #That returns None on the first call, so it falls through to get_rs_config_id(), + #which is also mocked. + mrs.update_replset(deepcopy(rs_config)) + new_id = module.exit_json.call_args[1]['config']['_id'] + self.assertEqual(rs_id, new_id) + + @update_replset_mock + def test_doc_id_managed_on_update(self, _1, _2, module, *args): + #old_rs_config provided by update_replset_mock via mrs.get_replset() + mrs.update_replset(deepcopy(rs_config)) + new_id = module.exit_json.call_args[1]['config']['_id'] + self.assertEqual(rs_id, new_id) + + @init_replset_mock + def test_initialize_if_necessary(self, initialize_replset, _2, module, *args): + mrs.update_replset(deepcopy(rs_config)) + self.assertTrue(initialize_replset.called) + #self.assertFalse(reconfig_replset.called) + + @update_replset_mock + def test_reconfig_if_necessary(self, reconfig_replset, _2, module, *args): + mrs.update_replset(deepcopy(rs_config)) + self.assertTrue(reconfig_replset.called) + #self.assertFalse(initialize_replset.called) + + @update_replset_mock + def test_not_changed_when_docs_match(self, _1, _2, module, *args): + rs_config = {'members': members} #This way the docs "match", but aren't identical + + mrs.update_replset(deepcopy(rs_config)) + changed = module.exit_json.call_args[1]['changed'] + self.assertFalse(changed) + + @update_replset_mock + def test_ignores_magic_given_full_doc(self, _1, _2, module, _3, get_rs_config_id, *args): + mrs.update_replset(deepcopy(new_rs_config)) + new_doc = module.exit_json.call_args[1]['config'] + self.assertEqual(new_doc, new_rs_config) + self.assertFalse(get_rs_config_id.called) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_playbooks.sh b/tests/test_playbooks.sh new file mode 100755 index 00000000000..52e4b3abafc --- /dev/null +++ b/tests/test_playbooks.sh @@ -0,0 +1,23 @@ +set -e + +ROOT_DIR=$PWD +cd playbooks +ROLE_DIRS=$(/bin/ls -d roles/*) +cat <syntax-check-test.yml +- name: Play to test all roles + hosts: all + roles: +EOF +for role_dir in $ROLE_DIRS; do + echo " - $(basename $role_dir)" >> syntax-check-test.yml +done + +ansible-playbook -i localhost, --syntax-check syntax-check-test.yml + +output_dir="$PWD/test_output/env-dep" +mkdir -p $output_dir +ansible-playbook -i localhost, -c local --tags "common_directories,edxapp_cfg" edxapp.yml -e edxapp_user=`whoami` -e edxapp_app_dir=$output_dir -e edxapp_code_dir=$output_dir -e EDXAPP_EDXAPP_SECRET_KEY='DUMMY KEY' + +root_dir=$output_dir +environment_deployments="." +source $ROOT_DIR/tests/validate_templates.sh diff --git a/tests/validate_templates.sh b/tests/validate_templates.sh new file mode 100755 index 00000000000..f1b985cdc40 --- /dev/null +++ b/tests/validate_templates.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# This file should be sourced +# The 'root_dir' and 'environment_deployments' variables +# should be set when we source this. + +FAIL=0 +for e_d in $environment_deployments +do + GREP_DIR="$root_dir/${e_d}" + if ! egrep -q -r --include *.json '{{' "${GREP_DIR}"; then + echo "No un-expanded vars in ${e_d}" + else + echo "Found un-expanded vars in ${e_d}" + echo `egrep -r --include *.json '{{' "${GREP_DIR}"` + FAIL=1 + fi + + if ! egrep -qi -r --include *.json \'"False"\' "${GREP_DIR}"; then + echo "No quoted False." + else + echo "Found a quoted boolean in ${e_d}" + echo `egrep -qi -r --include *.json "False" "${GREP_DIR}"` + FAIL=1 + fi + + if ! egrep -qi -r --include *.json '\"True\"' "${GREP_DIR}"; then + echo "No quoted False." + else + echo "Found a quoted boolean in ${e_d}" + echo `egrep -qi -r --include *.json '\"True\"' "${GREP_DIR}"` + FAIL=1 + fi +done + +if [ "$FAIL" -eq 1 ] ; then + echo "Failing..." + exit 1 +fi diff --git a/util/README.rst b/util/README.rst new file mode 100644 index 00000000000..607ca366fd7 --- /dev/null +++ b/util/README.rst @@ -0,0 +1,73 @@ +How to add Dockerfiles to configuration file +############################################ + +The script that handles distributing build jobs across Travis CI shards relies +on the parsefiles\_config YAML file. This file contains a mapping from each +application that has a Dockerfile to its corresponding weight/rank. The rank +refers to the approximate running time of a Travis Docker build for that +application's Dockerfile. When adding a new Dockerfile to the configuration +repository, this configuration file needs to be manually updated in order to +ensure that the Dockerfile is also built. + +To modify configuration file: + +1. Edit the docker.mk file: +2. Modify docker\_test to include date commands. + + Replace + + :: + + $(docker_test)%: .build/%/Dockerfile.test + docker build -t $*:test -f $< . + + with + + :: + +   $(docker_test)%: .build/%/Dockerfile.test + date + docker build -t $*:test -f $< . + date + +3. Replace the command that runs the dependency analyzer with a line to build + your Dockerfiles. + + For example, if adding Dockerfile for ecommerce, rabbit mq, replace + + ``images:=$(if $(TRAVIS_COMMIT_RANGE),$(shell git diff --name-only $(TRAVIS_COMMIT_RANGE) | python util/parsefiles.py),$(all_images))`` + + with + + ``images:= ecommerce rabbitmq`` + +4. Replace the command that runs the balancing script with a line to build all + images. + + Replace + + ``docker.test.shard: $(foreach image,$(shell echo $(images) | python util/balancecontainers.py $(SHARDS) | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))`` + + with + + ``docker.test.shard: $(foreach image,$(shell echo $(images) | tr ' ' '\n' | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))`` + +5. Commit and push to your branch. + +6. Wait for Travis CI to run the builds. + +7. Upon completion, examine the Travis CI logs to find where your Dockerfile + was built (search for "docker build -t"). Your Dockerfile should be built + by one of the build jobs with "MAKE_TARGET=docker.test.shard". Find the + amount of time the build took by comparing the output of the date command + before the build command starts and the date command after the build + command completes. + +8. Round build time to a whole number, and add it to the + configuration/util/parsefiles\_config.yml file. + +9. Undo steps 2, 3, 4 to revert back to the original state of the docker.mk + file. + +10. Commit and push to your branch. Your Dockerfile should now be built as a + part of the Travis CI tests. diff --git a/util/ansible_msg.py b/util/ansible_msg.py new file mode 100755 index 00000000000..84b4c08d434 --- /dev/null +++ b/util/ansible_msg.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +"""Simple utility for deciphering Ansible jsonized task output.""" + + +import json +import sys + +if len(sys.argv) > 1: + f = open(sys.argv[1]) +else: + if sys.stdin.isatty(): + print("Copy one complete line of junk from ansible output, and pipe it to me.") + sys.exit() + f = sys.stdin + +junk = f.read() +if not junk: + print("No message to decode.") + sys.exit() + +# junk: +# '==> default: failed: [localhost] (item=/edx/app/edx_ansible/edx_ansible/requirements.txt) => {"cmd": "/edx/app/edx...' + +junk = junk.replace('\n', '') +junk = junk[junk.index('=> {')+3:] +junk = junk[:junk.rindex('}')+1] + +data = json.loads(junk) + +# Order these so that the most likely useful messages are last. +GOOD_KEYS = ['cmd', 'module_stdout', 'module_stderr', 'warnings', 'msg', 'censored', 'stderr', 'stdout'] +IGNORE_KEYS = ['stdout_lines', 'stderr_lines', 'start', 'end', 'delta', 'changed', 'failed', 'rc', 'item'] + +unknown_keys = set(data) - set(GOOD_KEYS) - set(IGNORE_KEYS) +if unknown_keys: + print("== Unknown keys ======================") + for key in unknown_keys: + print(f"{key}: {data[key]!r:80}") + +for key in GOOD_KEYS: + if data.get(key): + print(f"== {key} ===========================") + print(data[key]) diff --git a/util/asg_event_notifications_util.py b/util/asg_event_notifications_util.py new file mode 100644 index 00000000000..9abc7902c72 --- /dev/null +++ b/util/asg_event_notifications_util.py @@ -0,0 +1,98 @@ +import boto3 +import click + +@click.group() +def cli(): + pass + +def get_asg_infos(): + + response = client.describe_auto_scaling_groups(MaxRecords=100) + auto_scaling_groups = response['AutoScalingGroups'] + + return auto_scaling_groups + +def get_asg_names(): + + asg_names = list() + for asg in get_asg_infos(): + asg_names.append(asg['AutoScalingGroupName']) + + return asg_names + +def get_asg_event_notifications(asg): + + event_notifications = list() + response = \ + client.describe_notification_configurations(AutoScalingGroupNames=[asg], + MaxRecords=100) + notification_configs = response['NotificationConfigurations'] + for notification in notification_configs: + event_notifications.append(notification['NotificationType']) + + return event_notifications + +@click.command() +def show_asg_event_notifications(): + + try: + + for asg in get_asg_names(): + event_notifications = get_asg_event_notifications(asg) + + if event_notifications: + print("Event notifications: {} are set for ASG: {}".format(event_notifications, + asg)) + else: + print(f"No Event Notifications found for ASG {asg}") + except Exception as e: + + print(e) + +@click.command() +@click.option('--topic_arn', help='The ARN of Amazon SNS topic', + required=True) +@click.option('--event', + help='The type of event that causes the notification to be sent' + , default='autoscaling:EC2_INSTANCE_LAUNCH_ERROR') +@click.option('--confirm', default=False, required=False, is_flag=True, + help='Set this to create event notification for asg') +def create_asg_event_notifications( + topic_arn, + event, + confirm, + ): + + asg_names = get_asg_names() + asg_to_create_event_notifications = list() + + for asg_name in asg_names: + + event_notifications = get_asg_event_notifications(asg_name) + + if event in event_notifications: + continue + else: + asg_to_create_event_notifications.append(asg_name) + + if confirm is False: + print(f"Would have created the event notification for asgs {asg_to_create_event_notifications}") + else: + try: + for asg in asg_to_create_event_notifications: + + response = \ + client.put_notification_configuration(AutoScalingGroupName=asg, + TopicARN=topic_arn, NotificationTypes=[event]) + + print(("Created {} event notifications for auto scaling group {}").format(event, + asg)) + except Exception as e: + print(e) + +cli.add_command(show_asg_event_notifications) +cli.add_command(create_asg_event_notifications) +if __name__ == '__main__': + + client = boto3.client('autoscaling') + cli() diff --git a/util/aws_ip_locator/example b/util/aws_ip_locator/example new file mode 100644 index 00000000000..71a44c4281a --- /dev/null +++ b/util/aws_ip_locator/example @@ -0,0 +1,53 @@ + + +This programs collects ips from various AWS services such as EC2, RDS, Elasticache etc.. + + +Usage: +. assume_role role 555555 +python ipcollector.py collect_ips --file_name ../../../edx-secure/optiv/ip_locator_inputs/edx.json + +Example file input: + +[{ + "title": "External ip list", + "external_hostnames": [ + "some-site.com", + "courses.edx.org" + ] + }, + { + "title": "Internal ip list", + "ec2_name_tags": [{ + "display_name": "display name 1", + "aws_tag_name": "some aws ec2 instance tag" + }, + { + "display_name": "display name 2", + "aws_tag_name": "some other tag" + } + ], + "ec2_elb_name_tags": [{ + "display_name": "some-elb", + "elb_name": "some-elb-name" + }, + { + "display_name": "(Expected unreachable) my-other-elb", + "elb_name": "some-other-elb" + } + ], + + "elasticache_clusters": [{ + "display_name": "redis", + "cluster_id": "some redis instance id" + }], + "rds_instances": [{ + "display_name": "some interesting RDS", + "instance_id": "actual internal instance name for interesting rds" + }], + "static_entries": [{ + "display_name": "Static report entry", + "display_value": "Static report value" + }] + } +] diff --git a/util/aws_ip_locator/ipcollector.py b/util/aws_ip_locator/ipcollector.py new file mode 100644 index 00000000000..a4350aa39a0 --- /dev/null +++ b/util/aws_ip_locator/ipcollector.py @@ -0,0 +1,158 @@ +import boto3 +import click +import socket +import json + +@click.group() +def cli(): + pass + +@click.command() +@click.option('--file_name', + required=True, + help=""" + file containing tags name etc that you would like to find ips for, see examples for an example of this input""") +def collect_ips(file_name): + output_json = json.load(open(file_name)) + + for entry in output_json: + print_header(entry['title']) + + external_hostnames_key = 'external_hostnames' + if external_hostnames_key in entry: + external_hostnames = entry[external_hostnames_key] + for hostname in external_hostnames: + print_line_item(hostname, get_ip_for_hostname(hostname)) + + ec2_instance_name_tags_key = 'ec2_instance_name_tags' + if ec2_instance_name_tags_key in entry: + ec2_name_tags = entry[ec2_instance_name_tags_key] + for pair in ec2_name_tags: + display_name = pair['display_name'] + aws_tag_name = pair['aws_tag_name'] + ip = get_instance_ip_by_name_tag(aws_tag_name) + print_line_item(display_name, ip) + + ec2_elb_name_tags_key = 'ec2_elb_name_tags' + if ec2_elb_name_tags_key in entry: + ec2_elb_name_tags = entry[ec2_elb_name_tags_key] + for pair in ec2_elb_name_tags: + display_name = pair['display_name'] + elb_name = pair['elb_name'] + ip = get_elb_ip_by_elb_name(elb_name) + print_line_item(display_name, ip) + + elasticache_clusters_key = 'elasticache_clusters' + if elasticache_clusters_key in entry: + elasticache_clusters = entry[elasticache_clusters_key] + for cluster in elasticache_clusters: + display_name = cluster['display_name'] + cluster_id = cluster['cluster_id'] + print_line_item(display_name, get_elasticache_ip_by_cluster_id(cluster_id)) + + rds_instances_key = 'rds_instances' + if rds_instances_key in entry: + rds_instances = entry[rds_instances_key] + for instance in rds_instances: + display_name = instance['display_name'] + instance_id = None + if 'instance_id' in instance: + instance_id = instance['instance_id'] + print_line_item(display_name, get_rds_ip_by_instance_id(instance_id)) + elif 'cluster_id' in instance: + cluster_id = instance['cluster_id'] + instance_id = get_writer_instance_id_by_cluster_id(cluster_id) + print_line_item(display_name, get_rds_ip_by_instance_id(instance_id)) + else: + raise ValueError('Cant locate RDS instance without instance_id or cluster_id') + + static_entries_key = 'static_entries' + if static_entries_key in entry: + static_entries = entry[static_entries_key] + for item in static_entries: + display_name = item['display_name'] + display_value = item['display_value'] + print_line_item(display_name, display_value) + + +cli.add_command(collect_ips) + +def get_ip_for_hostname(hostname): + return socket.gethostbyname(hostname) + +def print_header(name): + header =""" +============================ +{0} +============================""" + print(header.format(name)) + +def print_line_item(target, ip): + line = "[ * ] {0} - {1}" + print(line.format(target, ip)) + +def get_instance_ip_by_name_tag(value): + client = boto3.client('ec2') + filters = [{ + 'Name': 'tag:Name', + 'Values': [value] + }] + + response = client.describe_instances(Filters=filters) + + for r in response['Reservations']: + for i in r['Instances']: + if(i['State']['Name'] == 'running'): + ip = i['PrivateIpAddress'] + return ip + +def get_elb_ip_by_elb_name(elb_name): + client = boto3.client('elb') + response = client.describe_load_balancers( + LoadBalancerNames=[ + elb_name, + ] + ) + hostname = response['LoadBalancerDescriptions'][0]['DNSName'] + return get_ip_for_hostname(hostname) + +def get_elasticache_ip_by_cluster_id(cluster_id): + client = boto3.client('elasticache') + response = client.describe_cache_clusters( + CacheClusterId=cluster_id, + ShowCacheNodeInfo=True, + ) + hostname = response['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] + return get_ip_for_hostname(hostname) + + +def get_elasticache_ip_by_cluster_id(cluster_id): + client = boto3.client('elasticache') + response = client.describe_cache_clusters( + CacheClusterId=cluster_id, + ShowCacheNodeInfo=True, + ) + hostname = response['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] + return get_ip_for_hostname(hostname) + +def get_writer_instance_id_by_cluster_id(cluster_id): + client = boto3.client('rds') + response = client.describe_db_clusters( + DBClusterIdentifier=cluster_id + ) + members = response['DBClusters'][0]['DBClusterMembers'] + for member in members: + if member['IsClusterWriter']: + return member['DBInstanceIdentifier'] + raise ValueError('Could not locate RDS instance with given instance_id or cluster_id') + +def get_rds_ip_by_instance_id(instance_id): + client = boto3.client('rds') + response = client.describe_db_instances( + DBInstanceIdentifier=instance_id, + ) + hostname = response['DBInstances'][0]['Endpoint']['Address'] + return get_ip_for_hostname(hostname) + +if __name__ == '__main__': + cli() diff --git a/util/aws_ip_locator/requirements.txt b/util/aws_ip_locator/requirements.txt new file mode 100644 index 00000000000..c98674c95f8 --- /dev/null +++ b/util/aws_ip_locator/requirements.txt @@ -0,0 +1,11 @@ +boto3==1.9.0 +botocore==1.12.0 +click==6.7 +docutils==0.14 +futures==3.2.0 +jmespath==0.9.3 +netaddr==0.7.19 +python-dateutil==2.7.3 +s3transfer==0.1.13 +six==1.11.0 +urllib3==1.24.2 diff --git a/util/bake_config.sh b/util/bake_config.sh new file mode 100755 index 00000000000..545a9bc456c --- /dev/null +++ b/util/bake_config.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# For instructions on how to use this script see https://openedx.atlassian.net/wiki/spaces/EdxOps/pages/390627556/How+to+run+baked+config+on+your+laptop + +# Exit on fail +set -e + +# Enforce required envs +: ${WORKSPACE?"Need to set WORKSPACE"} +: ${CONFIG_RENDERING_TARGET?"Need to set CONFIG_RENDERING_TARGET"} + +# Optional envs you can override if you wish to render config for different EDPs +# these are expected to be comma separated with no spaces, see defaults. +ENVIRONMENT_DEPLOYMENTS=${ENVIRONMENT_DEPLOYMENTS:=stage-edx,prod-edx,prod-edge,developer-sandbox} +PLAYS=${PLAYS:=edxapp,analyticsapi,credentials,ecommerce,discovery,ecomworker,insights,registrar,notes} + +rm -rf $CONFIG_RENDERING_TARGET +cd $WORKSPACE/configuration/playbooks + +for ENVIRONMENT_DEPLOYMENT in $(echo $ENVIRONMENT_DEPLOYMENTS | sed "s/,/ /g") +do + ENVIRONMENT="$(echo $ENVIRONMENT_DEPLOYMENT | cut -d - -f 1 )" + DEPLOY="$(echo $ENVIRONMENT_DEPLOYMENT | cut -d - -f 2 )" + VARS="-e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${DEPLOY}.yml -e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${DEPLOY}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml" + + if [ "${ENVIRONMENT_DEPLOYMENT}" == "developer-sandbox" ]; then + VARS="-e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml -e ansible_ec2_public_ipv4=LINTING" + fi + + mkdir -p $CONFIG_RENDERING_TARGET/$ENVIRONMENT_DEPLOYMENT + + # PLAYS for Environment/Deployment + for PLAY in $(echo $PLAYS | sed "s/,/ /g") + do + if [ "$PLAY" == "edxapp" ]; then + # LMS / CMS for Environment/Deployment + ansible-playbook --become-user=$(whoami) -vvv -c local -i 'localhost,' --tags edxapp_cfg_yaml_only ./edxapp.yml $VARS -e edxapp_user=$(whoami) -e common_web_group=$(whoami) -e COMMON_CFG_DIR=$CONFIG_RENDERING_TARGET/$ENVIRONMENT_DEPLOYMENT + else + # All other IDAs + ansible-playbook --become-user=$(whoami) -vvv -c local -i 'localhost,' --tags install:app-configuration ./$PLAY.yml $VARS -e COMMON_CFG_DIR=$CONFIG_RENDERING_TARGET/$ENVIRONMENT_DEPLOYMENT + fi + done +done diff --git a/util/check_for_key_collisions/README.md b/util/check_for_key_collisions/README.md new file mode 100644 index 00000000000..5a7be77a239 --- /dev/null +++ b/util/check_for_key_collisions/README.md @@ -0,0 +1,4 @@ +Finds if there are colliding keys in a set of yaml/json files that might collide when ansible merges happen + +USAGE: +python check_for_yaml_key_collisions/check_for_yaml_key_collisions.py --files file1.yml --files file2.json \ No newline at end of file diff --git a/util/check_for_key_collisions/check_for_key_collisions.py b/util/check_for_key_collisions/check_for_key_collisions.py new file mode 100644 index 00000000000..16a4f73afc0 --- /dev/null +++ b/util/check_for_key_collisions/check_for_key_collisions.py @@ -0,0 +1,39 @@ +import click +import yaml +import json +from collections import defaultdict +import six + +@click.command() +@click.option('--files', '-m', multiple=True) +def check_for_yaml_key_collisions(files): + values_for_keys = defaultdict(lambda: []) + for file_path in files: + content = None + if file_path.endswith(".yml") or file_path.endswith(".yaml"): + stream = open(file_path) + content = yaml.safe_load(stream) + elif file_path.endswith(".json"): + with open(file_path) as read_file: + content = json.load(read_file) + for key, value in content.items(): + values_for_keys[key].append(value) + + collisions = {} + + for key,value in values_for_keys.items(): + if len(value) > 1: + collisions[key] = value + + + if len(list(collisions.keys())) > 0: + print(str.format("Found key collisions: {}", len(collisions))) + for key,value in collisions.items(): + print(str.format("{} {}", key, value)) + exit(1) + else: + print("No collisions found") + exit(0) + +if __name__ == '__main__': + check_for_yaml_key_collisions() diff --git a/util/check_for_key_collisions/requirements.txt b/util/check_for_key_collisions/requirements.txt new file mode 100644 index 00000000000..28655b36eb1 --- /dev/null +++ b/util/check_for_key_collisions/requirements.txt @@ -0,0 +1,2 @@ +Click==7.0 +PyYAML==5.4.1 diff --git a/util/check_rds_configs/check_rds_configs.py b/util/check_rds_configs/check_rds_configs.py new file mode 100644 index 00000000000..86e84eb2fd5 --- /dev/null +++ b/util/check_rds_configs/check_rds_configs.py @@ -0,0 +1,172 @@ +import boto3 +from botocore.config import Config +import click + +tags_key_list = ["deployment", "environment", "cluster"] + +def get_db_instances(): + + """ + Returns: + List of provisioned RDS instances + """ + return rds.describe_db_instances()['DBInstances'] + +def get_db_clusters(): + + """ + Returns: + List of provisioned RDS instances + """ + return rds.describe_db_clusters()['DBClusters'] + +def get_db_parameters(parameter_group_type, parameter_group_name, marker): + + """ + Returns: + The detailed parameter list for a particular DB parameter + group Using marker as pagination token as at max it returns + 100 records + """ + + if parameter_group_type == "instance": + response = rds.describe_db_parameters( + DBParameterGroupName=parameter_group_name, + Marker=marker) + elif parameter_group_type == "cluster": + response = rds.describe_db_cluster_parameters( + DBClusterParameterGroupName=parameter_group_name, + Marker=marker) + return response + + +def check_slow_query_logs(parameter_group_type, parameter_group_name): + + slow_log_enabled = False + + marker = "" + + while True: + if marker is None: + break + + response = get_db_parameters(parameter_group_type, parameter_group_name, marker) + marker = response.get('Marker') + parameters = response.get('Parameters') + + for param in parameters: + if 'slow_query_log' in param['ParameterName']: + if 'ParameterValue' in param and param['ParameterValue'] == '1': + slow_log_enabled = True + break + + return slow_log_enabled + +def check_tags(tags_list, db, tags): + status = 0 + if tags: + for tag in tags: + if not tag["Key"] in tags_key_list: + tags_list.append(db) + status = 1 + return status, tags_list + else: + status = 1 + tags_list.append(db) + return status, tags_list +@click.command() +@click.option('--db_engine', help='Removed, left for compatibility') +@click.option('--ignore', type=(str), multiple=True, help='RDS Instances to ignore') +def cli(db_engine, ignore): + + ignore_rds = list(ignore) + slow_query_logs_disabled_rds = [] + instances_out_of_sync_with_instance_parameters = [] + instances_out_of_sync_with_cluster_parameters = [] + cluster_with_disabled_snapshot_tags = [] + instances_with_disabled_performance_insights = [] + instances_without_tags = [] + clusters_without_tags = [] + exit_status = 0 + + db_instances = get_db_instances() + db_clusters = get_db_clusters() + + db_instance_parameter_groups = {} + + for instance in db_instances: + arn = instance['DBInstanceArn'] + tags = rds.list_tags_for_resource(ResourceName=arn)['TagList'] + db_identifier = instance['DBInstanceIdentifier'] + print("Checking tags on DB instance {}".format(db_identifier)) + exit_status, instances_without_tags = check_tags(instances_without_tags, db_identifier, tags) + + if db_identifier not in ignore_rds and "test" not in db_identifier: + db_instance_parameter_groups[db_identifier] = {'instance': instance['DBParameterGroups'][0]} + + if instance['PerformanceInsightsEnabled'] == False: + instances_with_disabled_performance_insights.append(instance['DBInstanceIdentifier']) + + for cluster in db_clusters: + arn = cluster['DBClusterArn'] + db_cluster_identifier = cluster['DBClusterIdentifier'] + tags = rds.list_tags_for_resource(ResourceName=arn)['TagList'] + print("Checking cluster tags on DB cluster {}".format(db_cluster_identifier)) + if db_cluster_identifier not in ignore_rds and "test" not in db_cluster_identifier: + exit_status, clusters_without_tags = check_tags(clusters_without_tags, db_cluster_identifier, tags) + if cluster['CopyTagsToSnapshot'] == False: + cluster_with_disabled_snapshot_tags.append(cluster['DBClusterIdentifier']) + + for instance in cluster['DBClusterMembers']: + db_identifier = instance['DBInstanceIdentifier'] + print("Checking tags on cluster DB instance {}".format(db_identifier)) + if db_identifier not in ignore_rds and "test" not in db_identifier: + db_instance_parameter_groups[db_identifier]['cluster'] = cluster['DBClusterParameterGroup'] + if instance["DBClusterParameterGroupStatus"] != "in-sync": + instances_out_of_sync_with_cluster_parameters.append(db_identifier) + + + for db_identifier, parameter_groups in db_instance_parameter_groups.items(): + print("Checking paramter groups on DB {}".format(db_identifier)) + instance_parameter_group_name = parameter_groups['instance']['DBParameterGroupName'] + if parameter_groups['instance']['ParameterApplyStatus'] != "in-sync": + instances_out_of_sync_with_instance_parameters.append(db_identifier) + exit_status = 1 + + # First check if slow_query_logs are enabled in the instance parameter group which takes precedence over the cluster + # level parameter group + slow_query_logs_enabled = check_slow_query_logs('instance', instance_parameter_group_name) + + if 'cluster' in parameter_groups.keys(): + cluster_parameter_group_name = parameter_groups['cluster'] + # If slow query logs weren't enabled by a cluster level parameter, see if they are enabled at the instance level + if not slow_query_logs_enabled: + slow_query_logs_enabled = check_slow_query_logs('cluster', cluster_parameter_group_name) + + if not slow_query_logs_enabled: + exit_status = 1 + slow_query_logs_disabled_rds.append(db_identifier) + + if cluster_with_disabled_snapshot_tags: + exit_status = 1 + + print(f"Slow query logs are disabled for RDS Instances\n{slow_query_logs_disabled_rds}") + print() + print(f"Instance parameter groups out of sync/pending reboot for RDS Instances\n{instances_out_of_sync_with_instance_parameters}") + print() + print(f"Cluster parameter groups out of sync/pending reboot for RDS Instances\n{instances_out_of_sync_with_cluster_parameters}") + print() + print(f"Sanpshot tags are disabled for Clusters\n{cluster_with_disabled_snapshot_tags}") + print() + print(f"Performance Insights is disabled for RDS Instances\n{instances_with_disabled_performance_insights}") + print() + print(f"Tags are missing for the RDS Instances\n{instances_without_tags}") + print() + print(f"Tags are missing for the RDS Clusters\n{clusters_without_tags}") + print() + exit(exit_status) + +if __name__ == '__main__': + + rds = boto3.client('rds', config=Config(connect_timeout=5, read_timeout=60, retries={'max_attempts': 15})) + cli() diff --git a/util/check_rds_configs/requirements.txt b/util/check_rds_configs/requirements.txt new file mode 120000 index 00000000000..68c7b6c4342 --- /dev/null +++ b/util/check_rds_configs/requirements.txt @@ -0,0 +1 @@ +../jenkins/requirements.txt \ No newline at end of file diff --git a/util/cloudflare/by_origin_purger/README.md b/util/cloudflare/by_origin_purger/README.md new file mode 100644 index 00000000000..e001dd35204 --- /dev/null +++ b/util/cloudflare/by_origin_purger/README.md @@ -0,0 +1,34 @@ + +Cloudflare cache keys include the origin, so in order to purge assets with cached CORS headers you need to +purge cloudflare cache assets by origin + + + +build target list like so: +aws s3 ls s3://bucket-url/path --recursive | awk '{print $4}' > targets + +Make sure this seems reasonable... +cat targets +cat targets | wc -l + + python purger.py --origin https://example.edu --cloudflare_site_url https://cloudflare-example.net --target_path targets + Will purge: https://cloudflare-example.net/headerCCE-V230100/headerCCE-V230100.m3u8 at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V230400/headerABC-V230400_3_49.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V230600/headerABC-V230600_5_13.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V230700/headerABC-V230700_6_46.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231100/headerABC-V231100_1_5.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231200/headerABC-V231200_6_1.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231700/headerABC-V231700_2_11.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231900/headerABC-V231900_6_12.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V232000/headerABC-V232000_6_28.ts at origin https://example.edu and 51 others like it. Add --confirm to execute. + + python purger.py --origin https://example.edu --cloudflare_site_url https://cloudflare-example.net --target_path targets + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} diff --git a/util/cloudflare/by_origin_purger/purger.py b/util/cloudflare/by_origin_purger/purger.py new file mode 100644 index 00000000000..6b5d3a126e5 --- /dev/null +++ b/util/cloudflare/by_origin_purger/purger.py @@ -0,0 +1,53 @@ +import requests +import click + + + +def wrap(cloudflare_site_url, s3_asset_path, origin): + url = str.format("{}/{}", cloudflare_site_url, s3_asset_path) + return { + "url": url, + "headers": { + "Origin": origin + } + } + +def divide_chunks(list_to_divide, number_in_chunk): + for index in range(0, len(list_to_divide), number_in_chunk): + yield list_to_divide[index:index + number_in_chunk] + +@click.command() +@click.option('--cloudflare_email', required=True, envvar='CLOUDFLARE_EMAIL') +@click.option('--cloudflare_api_key', required=True, envvar='CLOUDFLARE_API_KEY') +@click.option('--cloudflare_zone_id', required=True, envvar='CLOUDFLARE_ZONE_ID', help='Get this from the zones API endpoint') +@click.option('--origin', required=True) +@click.option('--cloudflare_site_url') +@click.option('--target_path', required=True) +@click.option('--confirm', is_flag=True) +def purge(cloudflare_email, cloudflare_api_key, cloudflare_zone_id, origin, cloudflare_site_url, target_path, confirm): + with open(target_path) as f: + lines = f.readlines() + + lines = [x.strip() for x in lines] + for index, s3_asset_path in enumerate(lines): + lines[index] = wrap(cloudflare_site_url, s3_asset_path, origin) + + chunk_size = 500 + chunks = divide_chunks(lines, chunk_size) + for chunk in chunks: + if not confirm: + print(str.format("Will purge: {} at origin {} and {} others like it. Add --confirm to execute.", chunk[0]['url'], chunk[0]['headers']['Origin'], len(chunk))) + else: + headers = {'X-Auth-Email': cloudflare_email, + 'X-Auth-Key': cloudflare_api_key, + 'Content-Type': 'application/json'} + payload = { + "files": chunk + } + url = str.format("https://api.cloudflare.com/client/v4/zones/%7Bcloudflare_zone_id%7D/purge_cache", cloudflare_zone_id=cloudflare_zone_id) + response = requests.delete(url, headers=headers, json=payload) + print(response.json()) + +if __name__ == '__main__': + purge() + diff --git a/util/cloudflare/by_origin_purger/requirements.txt b/util/cloudflare/by_origin_purger/requirements.txt new file mode 120000 index 00000000000..8a8582a57da --- /dev/null +++ b/util/cloudflare/by_origin_purger/requirements.txt @@ -0,0 +1 @@ +../../jenkins/requirements-cloudflare.txt \ No newline at end of file diff --git a/util/cluster_instance_monitoring.py b/util/cluster_instance_monitoring.py new file mode 100644 index 00000000000..af38b479ca7 --- /dev/null +++ b/util/cluster_instance_monitoring.py @@ -0,0 +1,93 @@ +import boto3 +import argparse +import sys +import yaml +from pprint import pprint + +def find_active_instances(cluster_file, region): + """ + Determines if a given cluster has at least one ASG and at least one active instance. + + Input: + cluster_file: a yaml file containing a dictionary of triples that specify the particular cluster to monitor. + The keys of each entry in the dictionary are 'env', 'deployment', and 'cluster', specifying the environment, deployment, + and cluster to find ASG's and active instances for. + + """ + with open(cluster_file) as f: + cluster_map = yaml.safe_load(f) + + asg = boto3.client('autoscaling', region) + all_groups = asg.describe_auto_scaling_groups(MaxRecords=100) + + # dictionary that contains the environment/deployment/cluster triple as the key and the value is a list of the asgs that match the triple + all_matching_asgs = {} + + # all the triples for which an autoscaling group does not exist + not_matching_triples = [] + + # check if there exists at least one ASG for each triple + for triple in cluster_map: + #the asgs that match this particular triple + cluster_asgs = [] + + for g in all_groups['AutoScalingGroups']: + match_env = False + match_deployment = False + match_cluster = False + for tag in g['Tags']: + if tag['Key'] == 'environment' and tag['Value'] == triple['env']: + match_env = True + if tag['Key'] == 'deployment' and tag['Value'] == triple['deployment']: + match_deployment = True + if tag['Key'] == 'cluster' and tag['Value'] == triple['cluster']: + match_cluster = True + if match_env and match_cluster and match_deployment: + cluster_asgs += [g] + + if not cluster_asgs: + not_matching_triples += [triple] + else: + triple_str = triple['env'] + '-' + triple['deployment'] + '-' + triple['cluster'] + all_matching_asgs[triple_str] = cluster_asgs + + #The triples that have no active instances + no_active_instances_triples = [] + + #check that each triple has at least one active instance in at least one of its ASG's + for triple in all_matching_asgs: + asgs = all_matching_asgs[triple] + triple_has_active_instances = False + for asg in asgs: + for instance in asg['Instances']: + if instance['LifecycleState'] == 'InService': + triple_has_active_instances = True + if not triple_has_active_instances: + no_active_instances_triples += [triple] + + + if no_active_instances_triples or not_matching_triples: + if not_matching_triples: + print('Fail. There are no autoscaling groups found for the following cluster(s):') + pprint(not_matching_triples) + if no_active_instances_triples: + print("Fail. There are no active instances for the following cluster(s)") + for triple in no_active_instances_triples: + print('environment: ' + triple.split('-')[0]) + print('deployment: ' + triple.split('-')[1]) + print('cluster: ' + triple.split('-')[2]) + print('----') + sys.exit(1) + + print("Success. ASG's with active instances found for all of the cluster triples.") + sys.exit(0) + + +if __name__=="__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-f', '--file', help='Yaml file of env/deployment/cluster triples that we want to find active instances for', required=True) + parser.add_argument('-r', '--region', help="Region that we want to find ASG's and active instances in", default='us-east-1', required=True) + args = parser.parse_args() + + find_active_instances(args.file, args.region) + diff --git a/util/config/merge_json_to_yaml b/util/config/merge_json_to_yaml new file mode 100755 index 00000000000..f0bed725617 --- /dev/null +++ b/util/config/merge_json_to_yaml @@ -0,0 +1,20 @@ +#! /usr/bin/env python + +import json +import click +import yaml +import sys + +@click.command() +@click.argument('files', nargs=-1, type=click.Path()) +def join_json(files): + """ This script merges multiple JSON documents into a single namespace, then dumps as YAML """ + data = dict() + for file in files: + click.echo('# ingested %s' % file) + with open(file) as filehandle: + data.update(json.load(filehandle)) + print yaml.safe_dump(data) + +if __name__ == '__main__': + join_json() diff --git a/util/course-permutation-tool/README.rst b/util/course-permutation-tool/README.rst new file mode 100644 index 00000000000..f12dfaa2be6 --- /dev/null +++ b/util/course-permutation-tool/README.rst @@ -0,0 +1,11 @@ +Course Permutation Tool for Developer Environments +######################## + +This is a tool to add default courses to developer environments, specifically for +devstack and sandboxes. The goal is for developers to have access to courses with +metadata that matches that in production, and to provide a way to generate course +permutations. It will be comprised of a permutations JSON file, which +includes permutation options and default values, and also a Python script that will +generate the final file that will get passed into a course creation script. + +More info to come once finalized diff --git a/util/course-permutation-tool/permutations.json b/util/course-permutation-tool/permutations.json new file mode 100644 index 00000000000..ccee15e0e23 --- /dev/null +++ b/util/course-permutation-tool/permutations.json @@ -0,0 +1,54 @@ +{ + "permutation_data": { + "start": [ + "past", + "future", + null + ], + "end": [ + "past", + "future", + null + ], + "seats": [ + [ + "audit" + ], + [ + "verified" + ], + [ + "audit", + "verified" + ], + [], + null + ], + "display_name": [ + "International Project Management", + "Cybersecurity Fundamentals", + "", + null + ], + "mobile_available": [ + true, + false, + null + ] + }, + "default_data": { + "start": "past", + "end": "future", + "seats": [ + { + "type": [ + "audit", + "verified" + ], + "upgrade_deadline": "future" + } + ], + "display_name": "International Project Management", + "mobile_available": true + } +} diff --git a/util/create_data_czar/assign_czar_org_groups.py b/util/create_data_czar/assign_czar_org_groups.py new file mode 100755 index 00000000000..a3336fa570b --- /dev/null +++ b/util/create_data_czar/assign_czar_org_groups.py @@ -0,0 +1,56 @@ +""" +assign_czar_org_groups.py + +Assigns data czars to the iam groups for their org based on the configuration specificed in the analytics-exporter +repository, https://github.com/openedx/edx-analytics-exporter/blob/master/sample-config.yaml + +Assumes that a group for the org has already been created using the create_org_data_czar_polcy.py script. + +Assumes that the data czars email is their IAM user name. + +Assumes that org names are consistent in s3 and the yaml config file and IAM. + +""" + +import argparse +import boto +import yaml +import sys + + + +parser = argparse.ArgumentParser() +parser.add_argument('-f', '--file', help='Path to the Analytics YAML file containing ' + 'the organization meta-data which is located ') +parser.add_argument('-p', '--profile', help='The IAM profile to use when ' + 'adding user to groups') +args = parser.parse_args() + + +org_group_name_template = "edx-course-data-{org}" + +with open(args.file) as config: + data = yaml.load(config) + + +iam_connection = boto.connect_iam(profile_name=args.profile) + +for group, group_info in data['organizations'].items(): + print(f"Adding {group_info['recipients']} to group {group}.") + + # Add to the group providing general permissions for all data czars. + try: + for user in group_info['recipients']: + iam_connection.add_user_to_group('analytics-edx-course-data-s3-ro', user) + except Exception as e: + print(e) + + # Add to the org specific group + try: + pass + for user in group_info['recipients']: + iam_connection.add_user_to_group(org_group_name_template.format(org=group), user) + except Exception as e: + print(e) + +sys.exit(0) diff --git a/util/create_data_czar/create_data_czar.py b/util/create_data_czar/create_data_czar.py new file mode 100755 index 00000000000..74898975b14 --- /dev/null +++ b/util/create_data_czar/create_data_czar.py @@ -0,0 +1,58 @@ +import boto3 +import argparse +import gnupg +import sys + +# Assumes you have GPG already installed +# Assumes that the Data Czars already have your public key +# Asumes that .boto is configured with edX Prod account + +# Parser +parser = argparse.ArgumentParser(description="Username of Data Czar.") +parser.add_argument('-u', '--user', help='Email of Data Czar', required=True) +parser.add_argument('-f', '--file', help='Public Key file', required=True) +parser.add_argument('--credentials-only', help='Only create new credentials', default=False, action='http://github.com/store_true') +parser.add_argument('-o', '--orgs', nargs='*', help='Name of the org(s) as list, User need to be a member', default=None) +parser.add_argument('-c', '--creator', help='Name of the creator', default=None) +args = parser.parse_args() + +# Import Data Czar GPG Key +gpg = gnupg.GPG() +key_data = open(args.file).read() +import_result = gpg.import_keys(key_data) + +# Connect to AWS and create account +iam = boto3.client('iam') + +if not args.credentials_only: + user_response = iam.create_user(UserName=args.user) + if args.creator: + tag_response = iam.tag_user(UserName=args.user, Tags=[{'Key': 'Creator', 'Value': args.creator}]) + +key_response = iam.create_access_key(UserName=args.user) + +# Add user to group edx-s3bucket-course-data-readonly +iam.add_user_to_group(GroupName='edx-s3bucket-course-data-readonly', UserName=args.user) + +# Add user to it's respective Org +if args.orgs: + for org in args.orgs: + user_org = 'edx-course-data-' + org.lower() + iam.add_user_to_group(GroupName=user_org, UserName=args.user) + +# Create AWS Cred String +key = key_response['AccessKey'] +credstring = str(f'AWS_ACCESS_KEY_ID = {key["AccessKeyId"]} \nAWS_SECRET_ACCESS_KEY = {key["SecretAccessKey"]}') + +# Encrypt file +encrypted_data = gpg.encrypt(credstring, args.user, always_trust=True) +encrypted_string = str(encrypted_data) +gpgfile = open(args.user + '-credentials.txt.gpg', 'w+') +gpgfile.write(encrypted_string) + +print('ok: ', encrypted_data.ok) +print('status: ', encrypted_data.status) +print('stderr: ', encrypted_data.stderr) + +if 'error' in encrypted_data.stderr: + sys.exit(1) diff --git a/util/create_data_czar/create_org_data_czar_policy.py b/util/create_data_czar/create_org_data_czar_policy.py new file mode 100755 index 00000000000..34794681c71 --- /dev/null +++ b/util/create_data_czar/create_org_data_czar_policy.py @@ -0,0 +1,88 @@ +""" +create_org_data_czar_policy.py + +Creates an IAM group for an edX org and applies an S3 policy to that group +that allows for read-only access to the group. + +""" + +import argparse +import boto3 +from botocore.exceptions import ClientError +from string import Template +import sys + +template = Template("""{ + "Version":"2012-10-17", + "Statement": [ + { + "Sid": "AllowListingOfOrgFolder", + "Action": ["s3:ListBucket"], + "Effect": "Allow", + "Resource": ["arn:aws:s3:::edx-course-data"], + "Condition":{"StringLike":{"s3:prefix":["$org","$org/*"]}} + }, + { + "Sid": "AllowGetBucketLocation", + "Action": ["s3:GetBucketLocation"], + "Effect": "Allow", + "Resource": ["arn:aws:s3:::edx-course-data"] + }, + { + "Sid": "AllowGetS3ActionInOrgFolder", + "Effect": "Allow", + "Action": ["s3:GetObject"], + "Resource": ["arn:aws:s3:::edx-course-data/$org/*"] + } + ] +}""") + + +def add_org_group(org, iam_connection): + group_name = "edx-course-data-{org}".format(org=org) + + try: + iam_connection.create_group(GroupName=group_name) + except ClientError as bse: + if bse.response['ResponseMetadata']['HTTPStatusCode'] == 409: + pass + else: + print(bse) + + try: + iam_connection.put_group_policy( + GroupName=group_name, + PolicyName=group_name, + PolicyDocument=template.substitute(org=org) + ) + except boto.exception.BotoServerError as bse: + if bse.response['ResponseMetadata']['HTTPStatusCode'] == 409: + pass + else: + print(bse) + print(template.substitute(org=org)) + + +parser = argparse.ArgumentParser() +group = parser.add_mutually_exclusive_group() +group.add_argument('-o', '--org', help='Name of the org for which to create an IAM ' + 'role and policy, this should have the same ' + 'name as the S3 bucket') +group.add_argument('-f', '--file', help='The path to a file containing one org name ' + 'per line.') + +args = parser.parse_args() + +iam_connection = boto3.client('iam') +if args.org: + add_org_group(args.org.rstrip('\n').lower(), iam_connection) +elif args.file: + with open(args.file) as file: + for line in file: + org = line.rstrip('\n').lower() + add_org_group(org, iam_connection) +else: + parser.print_usage() + sys.exit(1) + +sys.exit(0) diff --git a/util/create_data_czar/remove_data_czar.py b/util/create_data_czar/remove_data_czar.py new file mode 100644 index 00000000000..7ddc5589a10 --- /dev/null +++ b/util/create_data_czar/remove_data_czar.py @@ -0,0 +1,66 @@ +import logging +import sys +import argparse +import boto3.session +import botocore.exceptions + +logger = logging.getLogger() +logger.setLevel(logging.INFO) +handler = logging.StreamHandler(sys.stdout) +handler.setLevel(logging.INFO) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +# Assumes that the Data Czars already have your public key +# Asumes that boto3 is configured with edX Prod account + +def delete_iam_user(session: boto3.session.Session, user_name: str) -> None: + """For a given boto3.session.Session, delete the IAM User and all assoc'd resources.""" + iam = session.resource("iam") + iam_client = session.client("iam") + user = iam.User(user_name) + try: + user.load() + except botocore.exceptions.ClientError as client_error: + # If load failed with NoSuchEntity, IAM User doesn't exist. + if client_error.response.get("Error", {}).get("Code", "") == "NoSuchEntity": + logger.error(f"User {user_name} does not exist") + return + raise client_error + logger.debug(f"Deleting IAM User: {user.arn}") + for group in user.groups.all(): + logger.debug(f"Removing {user.arn} from Group {group.arn}") + user.remove_group(GroupName=group.name) + try: + login_profile = iam.LoginProfile(user.name) + login_profile.load() + logger.debug(f"Deleting Login Profile (I.E. Password) from {user.arn}") + login_profile.delete() + except botocore.exceptions.ClientError as client_error: + # If load failed with NoSuchEntity, No Login Profile + if client_error.response.get("Error", {}).get("Code", "") != "NoSuchEntity": + raise client_error + for access_key in user.access_keys.all(): + logger.debug(f"Deleting Access Key from {user.arn}: {access_key.access_key_id}") + access_key.delete() + for policy in user.policies.all(): + logger.debug(f"Deleting Inline Policy from {user.arn}: {policy.name}") + policy.delete() + for policy in user.attached_policies.all(): + logger.debug(f"Detaching Managed Policy from {user.arn}: {policy.arn}") + user.detach_policy(PolicyArn=policy.arn) + # Deleting IAM User + user.delete() + logger.info(f"Deleted IAM user: {user.name}") + + +if __name__ == "__main__": + # Parser + parser = argparse.ArgumentParser(description="Username of Data Czar.") + parser.add_argument('-u', '--user', help='Email of Data Czar', required=True) + args = parser.parse_args() + + # Create boto3 session and delete user + user_name = args.user + session = boto3.session.Session() + delete_iam_user(session, user_name) diff --git a/util/csmh-extended/migrate-same-database-instance.sh b/util/csmh-extended/migrate-same-database-instance.sh new file mode 100644 index 00000000000..d0bf6744628 --- /dev/null +++ b/util/csmh-extended/migrate-same-database-instance.sh @@ -0,0 +1,19 @@ +MINID=0 +MAXID=1003426362 +STEP=10000 +MIGRATE_USER=migrate +PASSWORD='secret-password' +HOST='my-database-instance' + + +for ((i=0; i<=$MAXID; i+=$STEP)); do +echo -n "$i"; +mysql -u $MIGRATE_USER -p$PASSWORD -h $HOST wwc < "$SOURCE_SERVER" + index => "$SOURCE_INDEX" #content for forums + scroll => "12h" #must be as long as the run takes to complete + scan => true #scan through all indexes efficiently + docinfo => true #necessary to move document_type and document_id over + } +} + +output { + elasticsearch { + hosts => "$TARGET_SERVER" + index => "$TARGET_INDEX" #same as above + manage_template => false + document_type => "%{[@metadata][_type]}" + document_id => "%{[@metadata][_id]}" + } + stdout { + codec => "dots" #Print a dot when stuff gets moved so we know it's working + } +} + +filter { + mutate { + remove_field => ["@timestamp", "@version"] #these fields get added by logstash for some reason + } +} +EOF + +logstash -w "$WORKERS" -e "$filter" diff --git a/util/elasticsearch/forums-incremental-reindex.sh b/util/elasticsearch/forums-incremental-reindex.sh new file mode 100755 index 00000000000..6bb94bc5469 --- /dev/null +++ b/util/elasticsearch/forums-incremental-reindex.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +set -euo pipefail + +# +# Thin wrapper around rake search:catchup for cs_comment_service (forums). +# +# Reindexes documents created since WINDOW ago. +# If SLEEP_TIME is set to any number greater than 0, loops indefinitely. Since re- +# indexing can only yield correct results, the only risk of setting WINDOW too large +# is poor performance. +# +# Usage: +# source ../forum_env; ./incremental-reindex.sh INDEX [WINDOW] [SLEEP_TIME] [BATCH_SIZE] +# +# Args: +# INDEX The index to re-index +# WINDOW Number of minutes ago to re-index from +# SLEEP_TIME Number of seconds to sleep between re-indexing +# BATCH_SIZE Number of documents to index per batch +# +# Example: +# ./incremental-reindex.sh content 30 +# + +INDEX="$1" +WINDOW="${2:-5}" +SLEEP_TIME="${3:-60}" +BATCH_SIZE="${4:-500}" + +if [ "$SLEEP_TIME" -ge "$((WINDOW * 60))" ]; then + echo 'ERROR: SLEEP_TIME must not be longer than WINDOW, or else documents may be missed.' + exit 1 +fi + +while : ; do + echo "reindexing documents newer than $WINDOW minutes..." + rake search:catchup["$WINDOW","$INDEX","$BATCH_SIZE"] + echo "done. Sleeping $SLEEP_TIME seconds..." + sleep "$SLEEP_TIME" + + [ "$SLEEP_TIME" -le 0 ] && break +done diff --git a/util/elasticsearch/requirements.txt b/util/elasticsearch/requirements.txt new file mode 100644 index 00000000000..41dbdaf1fe3 --- /dev/null +++ b/util/elasticsearch/requirements.txt @@ -0,0 +1,14 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +deepdiff==3.1.0 + # via -r requirements/elasticsearch.in +elasticsearch==0.4.5 + # via -r requirements/elasticsearch.in +jsonpickle==3.0.2 + # via deepdiff +urllib3==1.26.18 + # via elasticsearch diff --git a/util/elasticsearch/verify-index-copy.py b/util/elasticsearch/verify-index-copy.py new file mode 100755 index 00000000000..74aeffe5684 --- /dev/null +++ b/util/elasticsearch/verify-index-copy.py @@ -0,0 +1,345 @@ +""" +Verifies that an index was correctly copied from one ES host to another. +""" + +import itertools +import pprint +import random + +from deepdiff import DeepDiff +from elasticsearch import Elasticsearch +from elasticsearch.helpers import scan +from argparse import ArgumentParser + + +description = """ +Compare two Elasticsearch indices +""" + +SCAN_ITER_STEP = 50 +SCAN_MATCH_THRESHOLD = .9 + +RANDOM_CHECK_SIZE = 10 +RANDOM_CHECKS_BEFORE_RESET = 100 + +def parse_args(): + """ + Parse the arguments for the script. + """ + parser = ArgumentParser(description=description) + + parser.add_argument( + '-o', '--old', dest='old', required=True, nargs=2, + help='Hostname and index of old ES host, e.g. https://localhost:9200 content' + ) + parser.add_argument( + '-n', '--new', dest='new', required=True, nargs=2, + help='Hostname of new ES host, e.g. https://localhost:9200 content' + ) + parser.add_argument( + '-s', '--scan', dest='scan', action="http://github.com/store_true", + help='Run a full scan comparison instead of a random selection.' + ) + parser.add_argument( + '-c', '--check-percentage', dest='check_percentage', type=float, default=.1, + help='Percentage of randomly found docs to check between old and new indices (default: .1)' + ) + + return parser.parse_args() + + +def grouper(iterable, n): + """ + Collect data into fixed-length chunks or blocks + from the import itertools recipe list: https://docs.python.org/3/library/itertools.html#recipes + """ + # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" + args = [iter(iterable)] * n + return itertools.zip_longest(*args) + + +def docs_match(old_doc, new_doc): + """ + Return True if the the docs match, minus the ignorable fields + + Args: + old_doc: a dict of an elasticsearch doc from the old cluster + new_doc: a dict of an elasticsearch doc from the new cluster + """ + """ + example doc: + {'dictionary_item_added': { + "root['_source']['_id']", + "root['_source']['abuse_flaggers']", + "root['_source']['anonymous']", + "root['_source']['anonymous_to_peers']", + "root['_source']['at_position_list']", + "root['_source']['author_username']", + "root['_source']['closed']", + "root['_source']['comment_count']", + "root['_source']['historical_abuse_flaggers']", + "root['_source']['pinned']", + "root['_source']['thread_type']", + "root['_source']['visible']", + "root['_source']['votes']", + "root['found']"}, + 'dictionary_item_removed': { + "root['_source']['id']", + "root['_source']['thread_id']", + "root['_source']['votes_point']", + "root['exists']"}, + 'values_changed': { + "root['_index']": { + 'new_value': u'content_20170324145539907', + 'old_value': u'content_20151207225034'}, + "root['_source']['body']": { + 'new_value': u'encryption neglect hypothesize polluters wining pitiably prophetess apostrophe foretelling assignments diaphragms trustees scroll scruffs shrivels characterizes digraph lasted sharked rewind chamoix charier protoplasm rapports isolated upbraid mortgaged cuddled indefinitely sinful insaner slenderized cemetery deject soundly preventable', + 'old_value': u'embellishing orbitals complying alternation welching sepulchered grate blench placenta landslide dependance hurdle predicted chaplet earsplitting assess awol necrosis freeways skipper delicatessen sponsorship bellboys antiseptics gabardines admittedly screechier professional roughness educations nutting valences iridescence deductions'}, + "root['_source']['title']": { + 'new_value': u'southpaw afterward playgoers roughed requites arrived byplay ninetieth textural rental foreclosing', +    'old_value': u'guttersnipes corduroys ghostly discourtesies'}, + "root['_source']['updated_at']": { + 'new_value': u'2017-03-29T18:51:19Z', +    'old_value': u'2017-03-28T12:58:02Z'}, + "root['_version']": { + 'new_value': 20, + 'old_value': 1}}} +""" + ignorable_fields = [ + "root['exists']", + "root['found']", + "root['_index']", + "root['updated_at']", + "root['_version']", + "root['_score']", + ] + diff_types = ['dictionary_item_added', 'dictionary_item_removed', 'values_changed'] + diff_doc = DeepDiff(old_doc, new_doc) + + if 'values_changed' not in diff_doc: + diff_doc['values_changed'] = set() + + #if this fails something is horribly wrong + if set(diff_doc.keys()) != set(diff_types): + print('ERROR: expected to be diffing dictionaries, got something else! id: {}'.format( + new_doc['_id'])) + + for diff_type in diff_types: + for field in ignorable_fields: + if diff_type in diff_doc: + #values_changed is a set, the other two are dicts + if isinstance(diff_doc[diff_type], set): + diff_doc[diff_type].discard(field) + else: + diff_doc[diff_type].pop(field, None) + + return all(len(diff_doc[diff_type]) == 0 for diff_type in diff_types) + + +def find_matching_ids(es, index, ids, docs): + """ + Finds out how many of the ids in the given ids are in the given index in the given + ES deployment. + + We also compare documents to ensure that those still match, skipping a few fields + (see docs_match() for which ones). + + Args: + es - Elasticsearch instance corresponding to the cluster we want to check + index - name of the index that we want to check + ids - a list of dictionaries of the form {'_id': } of the ids we want to check. + docs - a dictionary of the form {'': document}, where "document"s are full ES docs + """ + body = {'docs': ids} + + search_result = es.mget(index=index, body=body) + matching = 0 + for elt in search_result['docs']: + # Checks whether or not there was a document matching the id at all. + # 'exists' is 0.9.x + # 'found' is 1.5.x + if elt.get('exists', False) or elt.get('found', False): + if docs_match(docs[elt['_id']], elt): + matching += 1 + else: + print('FAILURE: Documents with id {id} do not match: '.format( + id=elt['_id'] + ) + repr({'diff': DeepDiff(docs[elt['_id']], elt), 'new': elt, 'old': docs[elt['_id']]})) + else: + print('ERROR: Document with id {id} missing: {doc}'.format( + id=elt['_id'], doc=docs[elt['_id']] + )) + return matching + + +def scan_documents(old_es, new_es, old_index, new_index): + """ + Scan for matching documents + + In order to match the two indices without having to deal with ordering issues, + we pull a set of documents from the old ES index, and then try to find matching + documents with the same _id in the new ES index. This process is batched to avoid + making individual network calls to the new ES index. + """ + + matching = 0 + total = 0 + old_iter = scan(old_es, index=old_index) + for old_elts in grouper(old_iter, SCAN_ITER_STEP): + + old_elt_ids = [] + old_elt_docs = {} + for elt in old_elts: + if elt is not None: + old_elt_ids.append({'_id': elt['_id']}) + old_elt_docs[elt['_id']] = elt + + matching += find_matching_ids(new_es, new_index, old_elt_ids, old_elt_docs) + total += len(old_elt_ids) + if total % 100 == 0: + print(f'processed {total} items') + + ratio = float(matching)/total + print("{}: scanned documents matching ({} out of {}, {:.6}%)".format( + 'OK' if ratio > SCAN_MATCH_THRESHOLD else 'FAILURE', matching, total, ratio * 100 + )) + + +def random_checks(old_es, new_es, old_index, new_index, total_document_count, check_percentage): + """ + Check random documents + This is meant to be a random search trying to spot checks on whether + or not data was moved over correctly. Runs a lot faster than the full scan. + """ + + total = 0 + matching = 0 + current_offset = -1 + while float(total) / total_document_count < check_percentage: + # We only want to page a certain amount before regenerating a new set of + # random documents. + if current_offset > RANDOM_CHECKS_BEFORE_RESET or current_offset < 0: + seed = random.randint(0, 1000) + current_offset = 0 + body = { + 'size': RANDOM_CHECK_SIZE, + 'from': current_offset, + 'query': { + 'function_score': { + 'functions': [{ + 'random_score': { + 'seed': seed + } + }] + } + } + } + results = old_es.search( + index=old_index, body=body + ) + ids = [] + docs = {} + for elt in results['hits']['hits']: + ids.append({'_id': elt['_id']}) + docs[elt['_id']] = elt + matching += find_matching_ids(new_es, new_index, ids, docs) + num_elts = len(ids) + total += num_elts + current_offset += num_elts + + if total % 100 == 0: + print(f'processed {total} items') + + ratio = float(matching) / total + print("{}: random documents matching ({} out of {}, {}%)".format( + 'OK' if ratio > SCAN_MATCH_THRESHOLD else 'FAILURE', matching, total, int(ratio * 100) + )) + + +def check_mappings(old_mapping, new_mapping): + """ + Verify that the two mappings match in terms of keys and properties + Args: + - old_mapping (dict) - the mappings from the older ES + - new_mapping(dict) - the mappings from the newer ES + """ + + deep_diff = DeepDiff(old_mapping, new_mapping) + if deep_diff != {}: + print("FAILURE: Index mappings do not match") + pprint.pprint(deep_diff) + else: + print("OK: Index mappings match") + + +def main(): + """ + Run the verification. + """ + args = parse_args() + old_es = Elasticsearch([args.old[0]]) + new_es = Elasticsearch([args.new[0]]) + + old_index = args.old[1] + new_index = args.new[1] + + old_stats = list(old_es.indices.stats(index=old_index)['indices'].values())[0]['primaries'] + new_stats = list(new_es.indices.stats(index=new_index)['indices'].values())[0]['primaries'] + + #compare document count + old_count = old_stats['docs']['count'] + new_count = new_stats['docs']['count'] + + print("{}: Document count ({} = {})".format( + 'OK' if old_count == new_count else 'FAILURE', old_count, new_count + )) + + old_size = old_stats['store']['size_in_bytes'] + new_size = new_stats['store']['size_in_bytes'] + print("{}: Index size ({} = {})".format( + 'OK' if old_count == new_count else 'FAILURE', old_size, new_size + )) + + def get_mappings(es, index): + # for 1.5.x, there is an extra 'mappings' field that holds the mappings. + mappings = list(es.indices.get_mapping(index=index).values())[0] + new_style = mappings.get('mappings', None) + return new_style if new_style is not None else mappings + + # Verify that the mappings match between old and new + old_mapping = get_mappings(old_es, old_index) + new_mapping = get_mappings(new_es, new_index) + + check_mappings(old_mapping, new_mapping) + + if args.scan: + scan_documents(old_es, new_es, old_index, new_index) + else: + random_checks(old_es, new_es, old_index, new_index, new_count, args.check_percentage) + + + +""" +index.stats() +elasticsearch.scroll() +use without scan during downtime +elasticsearch.helpers.scan is an iterator (whew) + +sample first, then full validation + is old subset of new? + is number of edits small? + +no numeric ids +can use random scoring? +{"size": 1, "query": {"function_score": {"functions":[{"random_score": {"seed": 123456}}]}}} +use that with scroll and check some number +can't use scroll with sorting. Maybe just keep changing the seed? + It's kinda slow, but probably fine + get `size` at a time + are random sorts going to get the same docs on both clusters? +Alternative: random score with score cutoff? Or script field and search/cutoff + Might also be able to use track_scores with scan&scroll on 1.5 and a score cutoff +""" + +if __name__ == '__main__': + main() diff --git a/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py b/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py new file mode 100644 index 00000000000..0abd69d7b26 --- /dev/null +++ b/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py @@ -0,0 +1,276 @@ +import boto3 +import click +import datetime + +elb_client = None + + +@click.group() +def cli(): + pass + + +def get_client(): + global elb_client + if elb_client is None: + elb_client = boto3.client('elb') + return elb_client + + +def get_policies(): + client = get_client() + response = client.describe_load_balancer_policies() + policy_infos = response['PolicyDescriptions'] + return policy_infos + + +def get_tls_security_policy_template_names(): + policy_infos = get_policies() + policy_template_names = list() + + for policy_info in policy_infos: + if policy_info['PolicyTypeName'] == 'SSLNegotiationPolicyType': + policy_template_names.append(policy_info['PolicyName']) + + return policy_template_names + + +def check_valid_policy(ctx, param, value): + list_of_valid_policy_names = get_tls_security_policy_template_names() + if value not in list_of_valid_policy_names: + raise click.BadParameter("""Could not find the specified policy version, + found versions: {}""" + .format(list_of_valid_policy_names)) + return value + + +def get_elb_infos(): + client = get_client() + client.describe_load_balancers() + response = client.describe_load_balancers( + PageSize=400 + ) + return response['LoadBalancerDescriptions'] + + +def get_elb_names(): + elb_names = list() + for elb_info in get_elb_infos(): + elb_names.append(elb_info['LoadBalancerName']) + return elb_names + + +def print_header(header): + print("\n\n----------------------------------------------") + print(f"[ ] {header}") + print("----------------------------------------------") + + +def print_line_item(line_item): + print(f"[ * ] {line_item}") + + +def print_list(name, items_list): + print_header(name) + for item in items_list: + print_line_item(item) + + +def create_tls_policy(elb_name, policy_version_to_copy): + client = get_client() + policy_attributes = list() + # AWS will copy all the other attributes. + policy_attributes.append({ + "AttributeName": "Reference-Security-Policy", + "AttributeValue": policy_version_to_copy + }) + milli_datetime = str(int(datetime.datetime.now().strftime("%s")) * 1000) + print('Creating new policy for elb....') + new_policy_name = "SSLUpdateScript-SSLNegotiationPolicy-{}-{}".format( + elb_name, milli_datetime) + response = client.create_load_balancer_policy( + LoadBalancerName=elb_name, + PolicyName=new_policy_name, + PolicyTypeName='SSLNegotiationPolicyType', + PolicyAttributes=policy_attributes + ) + print('Done creating ...') + return new_policy_name + + +def elb_ref_policy(elb_name, policy_names): + ref_policies = list() + client = get_client() + + response = client.describe_load_balancer_policies( + LoadBalancerName=elb_name, + PolicyNames=policy_names + ) + + policies = response['PolicyDescriptions'] + for policy in policies: + if policy['PolicyTypeName'] == 'SSLNegotiationPolicyType': + for attribute in policy['PolicyAttributeDescriptions']: + if attribute['AttributeName'] == 'Reference-Security-Policy': + ref_policies.append(attribute['AttributeValue']) + return ref_policies + + +def get_reference_templates(elb_name): + client = get_client() + listener_descriptions = client.describe_load_balancers( + LoadBalancerNames=[ + elb_name, + ], + )['LoadBalancerDescriptions'][0]['ListenerDescriptions'] + reference_security_policies = list() + for listener_description in listener_descriptions: + if listener_description['Listener']['Protocol'] == 'HTTPS': + policy_names = listener_description['PolicyNames'] + elb_reference_policies = elb_ref_policy(elb_name, policy_names) + reference_security_policies.extend(elb_reference_policies) + return reference_security_policies + + +@click.command() +def show_available_policy_versions(): + list_of_valid_policy_names = get_tls_security_policy_template_names() + print_list('Available Policies: ', list_of_valid_policy_names) + + +@click.command() +def show_elb_policy_versions(): + print('\n Please be patient.. this may take a moment...\n\n') + elb_infos = get_elb_infos() + elbs_by_current_policy = {} + for elb_info in elb_infos: + elb_name = elb_info['LoadBalancerName'] + reference_templates = get_reference_templates(elb_name) + for reference_template in reference_templates: + if reference_template not in elbs_by_current_policy: + elbs_by_current_policy[reference_template] = [] + elbs_by_current_policy[reference_template].append(elb_name) + for policy_name in elbs_by_current_policy.keys(): + print_list(policy_name, elbs_by_current_policy[policy_name]) + print('\n\n') + + +@click.command() +@click.option('--policy_version', callback=check_valid_policy, + help='The TLS Policy version you would like to set', + required=True) +@click.option('--names', + required=False, + help=""" + Comma separated ELB names eg: + 'elb-name-app1,elb-name-app1'. + This field is case sensitive.""") +@click.option('--port_override', + required=False, + default=None, + help=""" + Force the tls updater to only pay attention to a specific port + By default it will find the correct port and do the right thing + this only matters if you have multiple tls listeners on different + ports""") +@click.option('--confirm', default=False, required=False, is_flag=True, + help='Set this when you actually want to do the update.') +def update_elb_policies(confirm, policy_version, names, port_override): + elb_names = get_elb_names() + elb_names_to_update = [] + + if names is not None: + names = names.replace(' ', '').split(',') + for name in names: + if name in elb_names: + elb_names_to_update.append(name) + else: + raise Exception('You must specify names...') + + elb_names_to_update = set(elb_names_to_update) + + if confirm is False: + print('\n\nIf I actually ran the update this would be the result:\n') + + if confirm is False: + print_list(policy_version, elb_names_to_update) + print('\nAppend --confirm to actually perform the update\n') + else: + for elb_name in elb_names_to_update: + tls_policy_name = create_tls_policy(elb_name, policy_version) + print(f"Trying to update...{elb_name}") + client = get_client() + + # Determine which policies are actually active + # on the ELB on the 443 listener, + # as AWS has all policies that have + # ever been active on the ELB in their policies endpoint + elbs = client.describe_load_balancers( + LoadBalancerNames=[ + elb_name, + ], + )['LoadBalancerDescriptions'] + + load_balancer_descriptions = list() + for elb in elbs: + if(elb['LoadBalancerName'] == elb_name): + load_balancer_descriptions.append(elb) + + load_balancer_description = load_balancer_descriptions[0] + + listeners = load_balancer_description['ListenerDescriptions'] + + active_policy_names = list() + tls_port = None + for listener in listeners: + if((port_override is not None and listener['Listener']['LoadBalancerPort'] == int(port_override)) or (port_override is None and listener['Listener']['Protocol'] == 'HTTPS')): + tls_port = listener['Listener']['LoadBalancerPort'] + active_policy_names.extend(listener['PolicyNames']) + break + + if(tls_port is None and port_override is not None): + print("""Skipped updating this ELB because it does not have a listener + on the specified override port\n""") + continue + + # Now remove the active TLS related policy from that list, + # this requires querying a different endpoint + # as there is no way to know which policies are active + # from the following endpoint: + policies = client.describe_load_balancer_policies( + LoadBalancerName=elb_name + )['PolicyDescriptions'] + + # Make a new list containing the new TLS policy, + # and any previously active policies that are not TLS policies + + non_tls_policies = list() + + for policy in policies: + if policy['PolicyTypeName'] != 'SSLNegotiationPolicyType': + non_tls_policies.append(policy) + + non_tls_policy_names = list() + for non_tls_policy in non_tls_policies: + non_tls_policy_names.append(non_tls_policy['PolicyName']) + + non_tls_policies_on_listener = list() + + for policy_name in active_policy_names: + if(policy_name in non_tls_policy_names): + non_tls_policies_on_listener.append(policy_name) + + policy_names = non_tls_policies_on_listener + [tls_policy_name] + response = client.set_load_balancer_policies_of_listener( + LoadBalancerName=elb_name, + LoadBalancerPort=tls_port, + PolicyNames=policy_names + ) + print(f"Updated {elb_name}\n") + +cli.add_command(show_available_policy_versions) +cli.add_command(show_elb_policy_versions) +cli.add_command(update_elb_policies) + +if __name__ == '__main__': + cli() diff --git a/util/elb_tls_policy_management_util/examples b/util/elb_tls_policy_management_util/examples new file mode 100644 index 00000000000..205560c6ed4 --- /dev/null +++ b/util/elb_tls_policy_management_util/examples @@ -0,0 +1,20 @@ +# +# Print help +python elb_tls_policy_management_util.py --help + +### List available policy versions +python elb_tls_policy_management_util.py show_available_policy_versions + +### List current policy versions for all elbs in account +python elb_tls_policy_management_util.py show_elb_policy_versions + +### Update ELBs by name +python elb_tls_policy_management_util.py update_elb_policies --policy_version ELBSecurityPolicy-TLS-1-1-2017-01 --names 'elb-name-app1,elb-name-app2' + +### Handle multiple listeners and non standard TLS port listeners +The default behaviour here will find the first HTTPS listener and set its policy to the policy you specify while preserving any additional policies attached to that listener. + +ELBs with multiple TLS listeners will be listed more than once in show_elb_policy_versions, to deal with these you will have to explicitly set the port like so: +python elb_tls_policy_management_util.py update_elb_policies --policy_version ELBSecurityPolicy-TLS-1-1-2017-01 --names 'elb-name-app1,elb-name-app2' --port_override + +You dont need to retarget when using the port override, as it will skip ELBs that dont have a listener on that port. \ No newline at end of file diff --git a/util/elb_tls_policy_management_util/requirements.txt b/util/elb_tls_policy_management_util/requirements.txt new file mode 100644 index 00000000000..a8ad9008830 --- /dev/null +++ b/util/elb_tls_policy_management_util/requirements.txt @@ -0,0 +1,8 @@ +boto3==1.4.8 +botocore==1.8.2 +click==6.7 +docutils==0.14 +jmespath==0.9.3 +python-dateutil==2.6.1 +s3transfer==0.1.11 +six==1.11.0 diff --git a/util/helm_values_to_rst_table_util/README.md b/util/helm_values_to_rst_table_util/README.md new file mode 100644 index 00000000000..bbc1c7a999b --- /dev/null +++ b/util/helm_values_to_rst_table_util/README.md @@ -0,0 +1,60 @@ + +Converts a helm charts' values.yaml file into an RST table. + +Example: + + + python helm_values_to_rst_table_util.py --values ../../../edx-notes-api/helmcharts/notes/values.yaml --subcharts mysql --subcharts elasticsearch + =================================================================================================== =================================================================================================== =================================================================================================== + Parameter Description Default + =================================================================================================== =================================================================================================== =================================================================================================== + app.replicaCount TODO 1 + app.image.repository TODO edxops/notes + app.image.tag TODO latest + app.image.pullPolicy TODO IfNotPresent + app.imagePullSecrets TODO [] + app.nameOverride TODO + app.fullnameOverride TODO + app.service.type TODO ClusterIP + app.service.port TODO 80 + app.ingress.enabled TODO False + app.ingress.hosts TODO [{'host': 'notes.local', 'paths': []}] + app.ingress.tls TODO [] + app.tolerations TODO [] + app.extraInitContainers TODO [] + app.config.ALLOWED_HOSTS TODO ['*'] + app.config.CLIENT_ID TODO + app.config.CLIENT_SECRET TODO + app.config.DATABASES.default.ENGINE TODO django.db.backends.mysql + app.config.DATABASES.default.HOST TODO notes-mysql + app.config.DATABASES.default.NAME TODO notes-db + app.config.DATABASES.default.OPTIONS.connect_timeout TODO 10 + app.config.DATABASES.default.PASSWORD TODO + app.config.DATABASES.default.PORT TODO 3306 + app.config.DATABASES.default.USER TODO notes-db-user + app.config.DISABLE_TOKEN_CHECK TODO False + app.config.ELASTICSEARCH_INDEX TODO edx_notes + app.config.ELASTICSEARCH_URL TODO http://notes-elasticsearch-client:9200 + app.config.HAYSTACK_CONNECTIONS.default.ENGINE TODO notesserver.highlight.ElasticsearchSearchEngine + app.config.HAYSTACK_CONNECTIONS.default.INDEX_NAME TODO notes + app.config.HAYSTACK_CONNECTIONS.default.URL TODO http://notes-elasticsearch-client:9200/ + app.config.JWT_AUTH.JWT_AUTH_COOKIE_HEADER_PAYLOAD TODO stage-edx-jwt-cookie-header-payload + app.config.JWT_AUTH.JWT_AUTH_COOKIE_SIGNATURE TODO stage-edx-jwt-cookie-signature + app.config.JWT_AUTH.JWT_ISSUERS TODO [] + app.config.JWT_AUTH.JWT_PUBLIC_SIGNING_JWK_SET TODO + app.config.RESULTS_DEFAULT_SIZE TODO 25 + app.config.RESULTS_MAX_SIZE TODO 250 + app.config.SECRET_KEY TODO + app.config.USERNAME_REPLACEMENT_WORKER TODO username_replacement_service_worker + app.config.LOG_SETTINGS_LOG_DIR TODO /var/tmp + app.config.LOG_SETTINGS_LOGGING_ENV TODO no_env + app.config.LOG_SETTINGS_DEV_ENV TODO True + app.config.LOG_SETTINGS_DEBUG TODO True + app.config.LOG_SETTINGS_LOCAL_LOGLEVEL TODO INFO + app.config.LOG_SETTINGS_EDX_FILENAME TODO edx.log + app.config.LOG_SETTINGS_SERVICE_VARIANT TODO edx-notes-api + elasticsearch.enabled TODO True + mysql.enabled TODO True + migrations.enabled TODO True + migrations.migrationContainerName TODO notes-migrations + =================================================================================================== =================================================================================================== =================================================================================================== \ No newline at end of file diff --git a/util/helm_values_to_rst_table_util/helm_values_to_rst_table_util.py b/util/helm_values_to_rst_table_util/helm_values_to_rst_table_util.py new file mode 100644 index 00000000000..5e9f167c7b9 --- /dev/null +++ b/util/helm_values_to_rst_table_util/helm_values_to_rst_table_util.py @@ -0,0 +1,91 @@ +import click +import yaml + +@click.command() +@click.option('--values', help='Path to a values.yaml file', required=True) +@click.option('--subcharts', help='Sub chart values to ignore', multiple=True) +def cli(values, subcharts): + with open(values) as stream: + parsed_dict = yaml.safe_load(stream) + keys_from_yaml = collect_keys_from_yaml(parsed_dict, subcharts) + col_width = 99 + print_header(col_width) + for dot_format_key in keys_from_yaml: + value = extract_default_using_dot_key(dot_format_key, parsed_dict) + print_row(dot_format_key, value, col_width) + print_bar(col_width) + +def collect_keys_from_yaml(parsed_dict, subcharts): + aggregate = [] + outp = get_keys("", parsed_dict) + for entry in outp: + first_part_of_key = entry.split(".")[0] + + if first_part_of_key not in subcharts or entry.endswith(".enabled"): + aggregate.append(entry) + return aggregate + +def print_bar(col_width): + p1 = int(col_width) * "=" + p2 = int(col_width) * "=" + p3 = int(col_width) * "=" + print(f"{p1} {p2} {p3}") + +def print_header(col_width): + word1 = "Parameter" + num_spaces1 = col_width - len(word1) + num_spaces1 = num_spaces1 + 1 + spaces1 = " " * num_spaces1 + + word2 = "Description" + num_spaces2 = col_width - len(word2) + num_spaces2 = num_spaces2 + 1 + spaces2 = " " * num_spaces2 + + word3 = "Default" + num_spaces3 = col_width - len(word3) + spaces3 = " " * num_spaces3 + + print_bar(col_width) + print(f"{word1}{spaces1} {word2}{spaces2} {word3}{spaces3}") + print_bar(col_width) + +def print_row(dot_format_key, value, col_width): + space1 = (" " * (col_width - len(dot_format_key))) + space2 = (" " * (col_width - len(dot_format_key))) + space3 = " " * (len(dot_format_key) - 2) + print(f"{dot_format_key}{space1} TODO{space2}{space3}{value}") + +def get_keys(prefix, inp): + if isinstance(inp, dict): + aggregate = [] + for child_key in inp.keys(): + child = inp[child_key] + + if prefix != "": + modified_prefix = prefix + "." + else: + modified_prefix = prefix + + if isinstance(child, dict): + aggregate.append(get_keys(modified_prefix + child_key, child)) + else: + aggregate.append(modified_prefix + child_key) + return flatten(aggregate); + +def extract_default_using_dot_key(dot_format_key, parsed_dict): + key_parts = dot_format_key.split(".") + result = parsed_dict + for key_part in key_parts: + result = result[key_part] + return result + +def flatten(target): + if target == []: + return target + if isinstance(target[0], list): + return flatten(target[0]) + flatten(target[1:]) + return target[:1] + flatten(target[1:]) + +if __name__ == '__main__': + cli() \ No newline at end of file diff --git a/util/helm_values_to_rst_table_util/requirements3.txt b/util/helm_values_to_rst_table_util/requirements3.txt new file mode 100644 index 00000000000..28655b36eb1 --- /dev/null +++ b/util/helm_values_to_rst_table_util/requirements3.txt @@ -0,0 +1,2 @@ +Click==7.0 +PyYAML==5.4.1 diff --git a/util/install/ansible-bootstrap.sh b/util/install/ansible-bootstrap.sh new file mode 100755 index 00000000000..c1e17464acc --- /dev/null +++ b/util/install/ansible-bootstrap.sh @@ -0,0 +1,190 @@ +#!/usr/bin/env bash + +# +# Script for installing Ansible and the edX configuration repository +# onto a host to enable running ansible to complete configuration. +# This script can be used by Docker, Packer or any other system +# for building images that require having ansible available. +# +# Can be run as follows: +# +# UPGRADE_OS=true CONFIGURATION_VERSION="master" \ +# bash <(curl -s https://raw.githubusercontent.com/edx/configuration/master/util/install/ansible-bootstrap.sh) + +set -xe + +if [[ -z "${CONFIGURATION_REPO}" ]]; then + CONFIGURATION_REPO="https://github.com/openedx/configuration.git" +fi + +if [[ -z "${CONFIGURATION_VERSION}" ]]; then + CONFIGURATION_VERSION=${OPENEDX_RELEASE-master} +fi + +if [[ -z "${UPGRADE_OS}" ]]; then + UPGRADE_OS=false +fi + +if [[ -z "${RUN_ANSIBLE}" ]]; then + RUN_ANSIBLE=true +fi + +# +# Bootstrapping constants +# +VIRTUAL_ENV_VERSION="16.7.10" +PIP_VERSION="21.2.1" +SETUPTOOLS_VERSION="44.1.0" +VIRTUAL_ENV="/tmp/bootstrap" +PYTHON_BIN="${VIRTUAL_ENV}/bin" +ANSIBLE_DIR="/tmp/ansible" +CONFIGURATION_DIR="/tmp/configuration" +EDX_PPA_KEY_SERVER="keyserver.ubuntu.com" +EDX_PPA_KEY_ID="B41E5E3969464050" + +cat << EOF +****************************************************************************** + +Running the edx_ansible bootstrap script with the following arguments: + +CONFIGURATION_REPO="${CONFIGURATION_REPO}" +CONFIGURATION_VERSION="${CONFIGURATION_VERSION}" + +****************************************************************************** +EOF + + +if [[ $(id -u) -ne 0 ]] ;then + echo "Please run as root"; + exit 1; +fi + +if grep -q 'Trusty Tahr' /etc/os-release +then + SHORT_DIST="trusty" +elif grep -q 'Xenial Xerus' /etc/os-release +then + SHORT_DIST="xenial" +elif grep -q 'Bionic Beaver' /etc/os-release +then + SHORT_DIST="bionic" +elif grep -q 'Focal Fossa' /etc/os-release +then + SHORT_DIST="focal" +else + cat << EOF + + This script is only known to work on Ubuntu Trusty, Xenial, and Bionic; + exiting. If you are interested in helping make installation possible + on other platforms, let us know. + +EOF + exit 1; +fi + +if [[ "${SHORT_DIST}" == focal ]] ;then + PYTHON_VERSION="3.8" +else + PYTHON_VERSION="3.5" +fi + +EDX_PPA="deb http://ppa.edx.org ${SHORT_DIST} main" + +# Upgrade the OS +rm -r /var/lib/apt/lists/* -vf +apt-get update -y + +# To apt-key update in bionic, gnupg is needed. +if [[ "${SHORT_DIST}" == bionic ]] ;then + apt-get install -y gnupg +fi + +apt-key update -y + +if [ "${UPGRADE_OS}" = true ]; then + echo "Upgrading the OS..." + apt-get upgrade -y +fi + +# Required for add-apt-repository +apt-get install -y software-properties-common +if [[ "${SHORT_DIST}" != trusty ]] && [[ "${SHORT_DIST}" != xenial ]] && [[ "${SHORT_DIST}" != bionic ]] && [[ "${SHORT_DIST}" != focal ]] ;then + apt-get install -y python-software-properties +fi + +# Add git PPA +add-apt-repository -y ppa:git-core/ppa + +# For older software we need to install our own PPA +# Phased out with Ubuntu 18.04 Bionic and Ubuntu 20.04 Focal +if [[ "${SHORT_DIST}" != bionic ]] && [[ "${SHORT_DIST}" != focal ]] ;then + apt-key adv --keyserver "${EDX_PPA_KEY_SERVER}" --recv-keys "${EDX_PPA_KEY_ID}" + add-apt-repository -y "${EDX_PPA}" +fi + +# Add deadsnakes repository for python3.5 usage in +# Ubuntu versions different than xenial. +if [[ "${SHORT_DIST}" != xenial ]] ;then + add-apt-repository -y ppa:deadsnakes/ppa +fi + +# Install python 2.7 latest, git and other common requirements +# NOTE: This will install the latest version of python 2.7 and +# which may differ from what is pinned in virtualenvironments +apt-get update -y + +if [[ "${SHORT_DIST}" != focal ]] ;then + apt-get install -y python2.7 python2.7-dev python-pip python-apt python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev +else + apt-get install -y python3-pip python3-apt python3-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev +fi + +apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python3-pip python3-apt + +# We want to link pip to pip3 for Ubuntu versions that don't have python 2.7 so older scripts work there +# Applies to Ubuntu 20.04 Focal +if [[ "${SHORT_DIST}" != trusty ]] && [[ "${SHORT_DIST}" != xenial ]] && [[ "${SHORT_DIST}" != bionic ]] ;then + sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.8 1 + sudo update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 +fi + +python${PYTHON_VERSION} -m pip install --upgrade pip=="${PIP_VERSION}" + +# pip moves to /usr/local/bin when upgraded +PATH=/usr/local/bin:${PATH} +python${PYTHON_VERSION} -m pip install setuptools=="${SETUPTOOLS_VERSION}" +python${PYTHON_VERSION} -m pip install virtualenv=="${VIRTUAL_ENV_VERSION}" + +if [[ "true" == "${RUN_ANSIBLE}" ]]; then + # create a new virtual env + /usr/local/bin/virtualenv --python=python${PYTHON_VERSION} "${VIRTUAL_ENV}" + + PATH="${PYTHON_BIN}":${PATH} + + # Install the configuration repository to install + # edx_ansible role + git clone ${CONFIGURATION_REPO} ${CONFIGURATION_DIR} + cd ${CONFIGURATION_DIR} + git checkout ${CONFIGURATION_VERSION} + make requirements + + cd "${CONFIGURATION_DIR}"/playbooks + "${PYTHON_BIN}"/ansible-playbook edx_ansible.yml -i '127.0.0.1,' -c local -e "CONFIGURATION_VERSION=${CONFIGURATION_VERSION}" + + # cleanup + rm -rf "${ANSIBLE_DIR}" + rm -rf "${CONFIGURATION_DIR}" + rm -rf "${VIRTUAL_ENV}" + rm -rf "${HOME}/.ansible" + + cat << EOF + ****************************************************************************** + + Done bootstrapping, edx_ansible is now installed in /edx/app/edx_ansible. + + ****************************************************************************** +EOF +else + mkdir -p /edx/ansible/facts.d + echo '{ "ansible_bootstrap_run": true }' > /edx/ansible/facts.d/ansible_bootstrap.json +fi diff --git a/util/install/generate-passwords.sh b/util/install/generate-passwords.sh new file mode 100755 index 00000000000..73f4f36dc12 --- /dev/null +++ b/util/install/generate-passwords.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Read a list of Ansible variables that should have generated values, and make +# a new file just like it, with the generated values. + +TARGET=${CONFIGURATION_VERSION-${OPENEDX_RELEASE-master}} +wget -q "https://raw.githubusercontent.com/edx/configuration/$TARGET/playbooks/sample_vars/passwords.yml" -O passwords-template.yml + +while IFS= read -r line; do + # Make a random string. SECRET_KEY's should be longer. + length=35 + if [[ $line == *SECRET_KEY* ]]; then + length=100 + fi + REPLACE=$(LC_ALL=C < /dev/urandom tr -dc 'A-Za-z0-9' | head -c$length) + # Change "!!null"-to-end-of-line to the password. + echo "$line" | sed "s/\!\!null.*/\'$REPLACE\'/" +done < passwords-template.yml > my-passwords.yml diff --git a/util/install/native.sh b/util/install/native.sh new file mode 100644 index 00000000000..d8432b062d9 --- /dev/null +++ b/util/install/native.sh @@ -0,0 +1,189 @@ +#!/bin/bash +## +## Installs the pre-requisites for running Open edX on a single Ubuntu 16.04 +## instance. This script is provided as a convenience and any of these +## steps could be executed manually. +## +## Note that this script requires that you have the ability to run +## commands as root via sudo. Caveat Emptor! +## + +## +## Sanity checks +## + +if [[ ! $OPENEDX_RELEASE ]]; then + echo "You must define OPENEDX_RELEASE" + exit +fi + +if [[ `lsb_release -rs` != "20.04" ]]; then + echo "This script is only known to work on Ubuntu 20.04, exiting..." + exit +fi + +# Config.yml is required, must define LMS and CMS names, and the names +# must not infringe trademarks. + +if [[ ! -f config.yml ]]; then + echo 'You must create a config.yml file specifying the hostnames (and if' + echo 'needed, ports) of your LMS and Studio hosts.' + echo 'For example:' + echo ' EDXAPP_LMS_BASE: "11.22.33.44"' + echo ' EDXAPP_CMS_BASE: "11.22.33.44:18010"' + exit +fi + +grep -Fq EDXAPP_LMS_BASE config.yml +GREP_LMS=$? + +grep -Fq EDXAPP_CMS_BASE config.yml +GREP_CMS=$? + +if [[ $GREP_LMS == 1 ]] || [[ $GREP_CMS == 1 ]]; then + echo 'Your config.yml file must specify the hostnames (and if' + echo 'needed, ports) of your LMS and Studio hosts.' + echo 'For example:' + echo ' EDXAPP_LMS_BASE: "11.22.33.44"' + echo ' EDXAPP_CMS_BASE: "11.22.33.44:18010"' + exit +fi + +grep -Fq edx. config.yml +GREP_BAD_DOMAIN=$? + +if [[ $GREP_BAD_DOMAIN == 0 ]]; then + echo '*** NOTE: Open edX and edX are registered trademarks.' + echo 'You may not use "openedx." or "edx." as subdomains when naming your site.' + echo 'For more details, see the edX Trademark Policy: https://edx.org/trademarks' + echo '' + echo 'Here are some examples of unacceptable domain names:' + echo ' openedx.yourdomain.org' + echo ' edx.yourdomain.org' + echo ' openedxyourdomain.org' + echo ' yourdomain-edx.com' + echo '' + echo 'Please choose different domain names.' + exit +fi + +## +## Log what's happening +## + +mkdir -p logs +log_file=$(realpath logs/install-$(date +%Y%m%d-%H%M%S).log) +exec > >(tee $log_file) 2>&1 +echo "Capturing output to $log_file" +echo "Installation started at $(date '+%Y-%m-%d %H:%M:%S')" + +function finish { + echo "Installation finished at $(date '+%Y-%m-%d %H:%M:%S')" +} +trap finish EXIT + +echo "Installing release '$OPENEDX_RELEASE'" + +## +## Set ppa repository source for gcc/g++ 4.8 in order to install insights properly +## +sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test + +## +## Update and Upgrade apt packages +## +sudo apt-get update -y +sudo apt-get upgrade -y + +## +## Install system pre-requisites +## +sudo apt-get install -y build-essential software-properties-common curl git-core libxml2-dev libxslt1-dev python3-pip libmysqlclient-dev python3-apt python3-dev libxmlsec1-dev libfreetype6-dev swig gcc g++ +# ansible-bootstrap installs yaml that pip 19 can't uninstall. +sudo apt-get remove -y python-yaml +sudo pip3 install --upgrade pip==21.2.1 +sudo pip3 install --upgrade setuptools==44.1.0 +sudo -H pip3 install --upgrade virtualenv==20.2.0 + +## +## Overridable version variables in the playbooks. Each can be overridden +## individually, or with $OPENEDX_RELEASE. +## +VERSION_VARS=( + EDX_PLATFORM_VERSION + FORUM_VERSION + XQUEUE_VERSION + CONFIGURATION_VERSION + DEMO_VERSION + INSIGHTS_VERSION + ANALYTICS_API_VERSION + ECOMMERCE_VERSION + ECOMMERCE_WORKER_VERSION + DISCOVERY_VERSION + THEMES_VERSION + MFE_DEPLOY_VERSION + RETIREMENT_SERVICE_VERSION +) + +for var in ${VERSION_VARS[@]}; do + # Each variable can be overridden by a similarly-named environment variable, + # or OPENEDX_RELEASE, if provided. + ENV_VAR=$(echo $var | tr '[:lower:]' '[:upper:]') + eval override=\${$ENV_VAR-\$OPENEDX_RELEASE} + if [ -n "$override" ]; then + EXTRA_VARS="-e $var=$override $EXTRA_VARS" + fi +done + +# my-passwords.yml is the file made by generate-passwords.sh. +if [[ -f my-passwords.yml ]]; then + EXTRA_VARS="-e@$(pwd)/my-passwords.yml $EXTRA_VARS" +fi + +EXTRA_VARS="-e@$(pwd)/config.yml $EXTRA_VARS" + +CONFIGURATION_VERSION=${CONFIGURATION_VERSION-$OPENEDX_RELEASE} + +## +## Clone the configuration repository and run Ansible +## +cd /var/tmp +git clone https://github.com/openedx/configuration +cd configuration +git checkout $CONFIGURATION_VERSION +git pull + +## +## Install the ansible requirements +## +cd /var/tmp/configuration +sudo -H pip3 install -r requirements.txt + +## +## Run the openedx_native.yml playbook in the configuration/playbooks directory +## +cd /var/tmp/configuration/playbooks && sudo -E ansible-playbook -c local ./openedx_native.yml -i "localhost," $EXTRA_VARS "$@" +ansible_status=$? + +if [[ $ansible_status -ne 0 ]]; then + echo " " + echo "============================================================" + echo "Ansible failed!" + echo "------------------------------------------------------------" + echo " " + echo "Decoded error:" + # Find the last "failed" or "fatal" line and decode it. + awk '/^(failed|fatal):/{bad=$0} END {if (bad) print bad}' $log_file | python3 /var/tmp/configuration/util/ansible_msg.py + echo " " + echo "============================================================" + echo "Installation failed!" + echo "------------------------------------------------------------" + echo "If you need help, see https://open.edx.org/getting-help ." + echo "When asking for help, please provide as much information as you can." + echo "These might be helpful:" + echo " Your log file is at $log_file" + echo " Your environment:" + env | egrep -i 'version|release' | sed -e 's/^/ /' + echo "============================================================" + exit ${ansible_status} +fi diff --git a/util/install/sandbox.sh b/util/install/sandbox.sh new file mode 100755 index 00000000000..fe81ec15932 --- /dev/null +++ b/util/install/sandbox.sh @@ -0,0 +1,11 @@ +#!/bin/bash +## +## This was the old name for native.sh. If someone runs it, tell them +## where the file went. +## + +echo +echo "The sandbox.sh script has been renamed to native.sh" +echo "Let the author of your instructions know, or send us an email at oscm@edx.org" +echo "Re-run your command with native.sh instead of sandbox.sh" +echo diff --git a/util/install/vagrant.sh b/util/install/vagrant.sh deleted file mode 100644 index f5eb371f2bd..00000000000 --- a/util/install/vagrant.sh +++ /dev/null @@ -1,42 +0,0 @@ -## -## Installs the pre-requisites for running edX on a single Ubuntu 12.04 -## instance. This script is provided as a convenience and any of these -## steps could be executed manually. -## -## Note that this script requires that you have the ability to run -## commands as root via sudo. Caveat Emptor! -## - -## -## Sanity check -## -if [[ ! "$(lsb_release -d | cut -f2)" =~ $'Ubuntu 12.04' ]]; then - echo "This script is only known to work on Ubuntu 12.04, exiting..."; - exit; -fi - -## -## Install system pre-requisites -## -sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev python-pip python-apt python-dev -wget https://bitbucket.org/pypa/setuptools/raw/0.8/ez_setup.py -O - | sudo python -sudo pip install --upgrade pip -sudo pip install --upgrade virtualenv - -## -## Clone the configuration repository and run Ansible -## -cd /var/tmp -git clone https://github.com/edx/configuration - -## -## Install the ansible requirements -## -cd /var/tmp/configuration -sudo pip install -r requirements.txt - -## -## Run the edx_sandbox.yml playbook in the configuration/playbooks directory -## -cd /var/tmp/configuration/playbooks -sudo ansible-playbook -c local ./edx_sandbox.yml -i "localhost," diff --git a/util/jenkins/add_new_xqueues_to_dashboard/__init__.py b/util/jenkins/add_new_xqueues_to_dashboard/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py b/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py new file mode 100644 index 00000000000..fef21151bbf --- /dev/null +++ b/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py @@ -0,0 +1,127 @@ +import pprint +import re + +import boto3 +import botocore +import backoff +import click +import json + +MAX_TRIES = 1 + +class CwBotoWrapper: + def __init__(self): + self.client = boto3.client('cloudwatch') + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def list_metrics(self, *args, **kwargs): + return self.client.list_metrics(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def put_dashboard(self, *args, **kwargs): + return self.client.put_dashboard(*args, **kwargs) + + +def generate_dashboard_widget_metrics( + cloudwatch, + namespace, + metric_name, + dimension_name, + properties={}, + right_axis_items=[] +): + pp = pprint.PrettyPrinter(indent=4) + + metrics = cloudwatch.list_metrics( + Namespace=namespace, MetricName=metric_name, Dimensions=[{"Name": dimension_name}] + ) + + values = [] + + for metric in metrics['Metrics']: + for dimension in metric['Dimensions']: + if dimension['Name'] == dimension_name: + values.append(dimension['Value']) + + values.sort() + + new_widget_metrics = [] + for value in values: + value_properties = properties.copy() + value_properties['label'] = value + if value in right_axis_items: + value_properties["yAxis"] = "right" + new_widget_metrics.append([namespace, metric_name, dimension_name, value, value_properties]) + + return new_widget_metrics + + +# * means that all arguments after cloudwatch are keyword arguments only and are not positional +def generate_dashboard_widget( + cloudwatch, + *, + x=0, + y, + title, + namespace, + metric_name, + dimension_name, + metrics_properties={}, + height, + width=24, + stacked=False, + region='us-east-1', + period=60, + right_axis_items=[] +): + return {'type': 'metric', 'height': height, 'width': width, 'x': x, 'y': y, + 'properties': { + 'period': period, 'view': 'timeSeries', 'stacked': stacked, 'region': region, + 'title': f"{title} (auto-generated)", + 'metrics': generate_dashboard_widget_metrics(cloudwatch, namespace, metric_name, dimension_name, + metrics_properties, right_axis_items=right_axis_items) + } + } + + +@click.command() +@click.option('--environment', '-e', required=True) +@click.option('--deploy', '-d', required=True, + help="Deployment (i.e. edx or stage)") +def generate_dashboard(environment, deploy): + pp = pprint.PrettyPrinter(indent=4) + cloudwatch = CwBotoWrapper() + + dashboard_name = f"{environment}-{deploy}-xqueues" + xqueue_namespace = f"xqueue/{environment}-{deploy}" + + widgets = [] + y_cord = 0 + height = 9 + + if deploy == 'edx' and environment == 'prod': + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title=f"{environment}-{deploy} Xqueue Queues", + namespace=xqueue_namespace, metric_name="queue_length", + dimension_name="queue", + ) + ) + + dashboard_body = {'widgets': widgets} + + print("Dashboard Body") + pp.pprint(dashboard_body) + + cloudwatch.put_dashboard(DashboardName=dashboard_name, + DashboardBody=json.dumps(dashboard_body)) + + +if __name__ == '__main__': + generate_dashboard() diff --git a/util/jenkins/add_new_xqueues_to_dashboard/requirements.txt b/util/jenkins/add_new_xqueues_to_dashboard/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/add_new_xqueues_to_dashboard/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/ansible-provision.sh b/util/jenkins/ansible-provision.sh index cbe8f0db25a..8a2a23634e6 100644 --- a/util/jenkins/ansible-provision.sh +++ b/util/jenkins/ansible-provision.sh @@ -3,7 +3,7 @@ # Ansible provisioning wrapper script that # assumes the following parameters set # as environment variables -# +# # - github_username # - server_type # - instance_type @@ -17,25 +17,96 @@ # - dns_name # - environment # - name_tag +set -x + +# Seeing the environment is fine, spewing secrets to the log isn't ok +env | grep -v AWS | grep -v ARN export PYTHONUNBUFFERED=1 export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto -if [[ -n $WORKSPACE ]]; then - # setup a virtualenv in jenkins - if [[ ! -d ".venv" ]]; then - virtualenv .venv - fi - source .venv/bin/activate - pip install -r requirements.txt +# docker on OS-X includes your Mac's home directory in the socket path +# that SSH/Ansible uses for the control socket, pushing you over +# the 108 character limit. +if [ -f /.dockerenv ]; then + export ANSIBLE_SSH_CONTROL_PATH=/tmp/%%C +fi + +run_ansible() { + if [[ "$VERBOSE" == "true" ]]; then + verbose_arg='-vvv' + else + verbose_arg='' + fi + + ansible-playbook $verbose_arg $@ + ret=$? + if [[ $ret -ne 0 ]]; then + exit $ret + fi +} + +# Install yq +wget https://github.com/mikefarah/yq/releases/download/v4.27.5/yq_linux_amd64 -O $WORKSPACE/yq && chmod +x $WORKSPACE/yq + +function provision_fluentd() { + echo "#!/usr/bin/env bash" + echo "set -ex" + + # add tracking log file to host instance + echo "touch /var/tmp/tracking_logs.log" + echo "chown www-data:www-data /var/tmp/tracking_logs.log" + + echo "docker pull fluent/fluentd:edge-debian" + + # create fluentd config + echo "fluentd_config=/var/tmp/fluentd.conf" + echo "cat << 'EOF' > \$fluentd_config + + @type tail + path /var/tmp/tracking_logs.log + pos_file /var/tmp/tracking_logs.pos + rotate_wait 10 + tag * + + @type none + + + + + @type stdout + +EOF" + echo "docker run -d --name fluentd --network host -v /var/tmp/fluentd.conf:/fluentd/etc/fluentd.conf -v /var/tmp:/var/tmp fluent/fluentd:edge-debian -c /fluentd/etc/fluentd.conf" +} + +# This DATE_TIME will be used as instance launch time tag +if [[ ! -n ${sandbox_life//[0-9]/} ]] && [[ ${sandbox_life} -le 30 ]]; then + TERMINATION_DATE_TIME=`date +"%m-%d-%Y %T" --date "${sandbox_life=7} days"` +else + echo "Please enter the valid value for the sandbox_life(between 1 to 30)" + exit 1 +fi + + +if [[ -z $BUILD_USER ]]; then + BUILD_USER=jenkins +fi + +if [[ -z $BUILD_USER_ID ]]; then + BUILD_USER_ID=edx-sandbox fi if [[ -z $WORKSPACE ]]; then dir=$(dirname $0) source "$dir/ascii-convert.sh" + source "$dir/app-container-provisioner.sh" + source "$dir/demo-course-provisioner.sh" else - source "$WORKSPACE/util/jenkins/ascii-convert.sh" + source "$WORKSPACE/configuration/util/jenkins/ascii-convert.sh" + source "$WORKSPACE/configuration/util/jenkins/app-container-provisioner.sh" + source "$WORKSPACE/configuration/util/jenkins/demo-course-provisioner.sh" fi if [[ -z $static_url_base ]]; then @@ -46,19 +117,34 @@ if [[ -z $github_username ]]; then github_username=$BUILD_USER_ID fi -if [[ ! -f $BOTO_CONFIG ]]; then +# Having access keys OR a boto config allows sandboxes to be built. +if [[ ( -z $AWS_ACCESS_KEY_ID || -z $AWS_SECRET_ACCESS_KEY ) && (! -f $BOTO_CONFIG) ]]; then echo "AWS credentials not found for $aws_account" exit 1 fi -extra_vars="/var/tmp/extra-vars-$$.yml" +extra_vars_file="/var/tmp/extra-vars-$$.yml" +sandbox_internal_vars_file="${WORKSPACE}/configuration-internal/ansible/vars/developer-sandbox.yml" +extra_var_arg="-e@${extra_vars_file}" +program_console="false" + +if [[ $edx_internal == "true" ]]; then + # if this is a an edx server include + # the secret var file + extra_var_arg="-e@${sandbox_internal_vars_file} -e@${extra_vars_file} -e DECRYPT_CONFIG_PRIVATE_KEY_PATH=$WORKSPACE -e DECRYPT_CONFIG_PRIVATE_KEY=$WORKSPACE/private.key -e ENCRYPTED_CFG_DIR=$WORKSPACE/configuration-internal/sandbox-remote-config/sandbox -e UNENCRYPTED_CFG_DIR=$WORKSPACE" +fi if [[ -z $region ]]; then region="us-east-1" fi +# edX has reservations for sandboxes in this zone, don't change without updating reservations. if [[ -z $zone ]]; then - zone="us-east-1b" + zone="us-east-1c" +fi + +if [[ -z $vpc_subnet_id ]]; then + vpc_subnet_id="subnet-cd867aba" fi if [[ -z $elb ]]; then @@ -66,154 +152,857 @@ if [[ -z $elb ]]; then fi if [[ -z $dns_name ]]; then - dns_name=$github_username + dns_name=${github_username} fi if [[ -z $name_tag ]]; then name_tag=${github_username}-${environment} fi +if [[ -z $sandbox_platform_name ]]; then + sandbox_platform_name=$dns_name +fi + if [[ -z $ami ]]; then if [[ $server_type == "full_edx_installation" ]]; then - ami="ami-bd6b6ed4" - elif [[ $server_type == "ubuntu_12.04" ]]; then - ami="ami-a73264ce" + ami="ami-0644020c3c81d30ba" + elif [[ $server_type == "ubuntu_18.04" ]]; then + ami="ami-07ebfd5b3428b6f4d" + elif [[ $server_type == "ubuntu_20.04" || $server_type == "full_edx_installation_from_scratch" ]]; then + ami="ami-089b5711e63812c2a" + # Ansible will always use Python3 interpreter on Ubuntu 20.04 hosts to execute modules + extra_var_arg+=' -e ansible_python_interpreter=auto' fi fi if [[ -z $instance_type ]]; then - instance_type="m1.medium" + instance_type="r5.large" +fi + +if [[ -z $instance_initiated_shutdown_behavior ]]; then + instance_initiated_shutdown_behavior="terminate" +fi + +if [[ -z $enable_newrelic ]]; then + enable_newrelic="false" +fi + +if [[ -z $enable_datadog ]]; then + enable_datadog="false" +fi + +if [[ -z $performance_course ]]; then + performance_course="false" +fi + +if [[ -z $edx_demo_course ]]; then + edx_demo_course="false" +fi + +if [[ -z $enable_automatic_auth_for_testing ]]; then + enable_automatic_auth_for_testing="false" +fi + +if [[ -z $enable_client_profiling ]]; then + enable_client_profiling="false" +fi + +if [[ -z $registrar ]]; then + registrar="false" +fi + +if [[ -z $registrar_version ]]; then + REGISTRAR_VERSION="master" +fi + +if [[ -z $license_manager ]]; then + license_manager="false" +fi + +if [[ -z $license_manager_version ]]; then + LICENSE_MANAGER_VERSION="master" +fi + +if [[ -z $commerce_coordinator ]]; then + commerce_coordinator="false" +fi + +if [[ -z $commerce_coordinator_version ]]; then + COMMERCE_COORDINATOR_VERSION="master" +fi + +if [[ -z $enterprise_catalog_version ]]; then + ENTERPRISE_CATALOG_VERSION="master" +fi + +if [[ -z $learner_portal ]]; then + learner_portal="false" +fi + +if [[ -z $learner_portal_version ]]; then + LEARNER_PORTAL_VERSION="master" +fi + +if [[ -z $prospectus ]]; then + prospectus="false" +fi + +if [[ -z $prospectus_version ]]; then + PROSPECTUS_VERSION="master" +fi + +if [[ -z $prospectus_contentful_environment ]]; then + prospectus_contentful_environment="master" +fi + +if [[ $registrar == 'true' ]]; then + program_console="true" +fi + +if [[ -z authn ]]; then + authn="false" +fi + +if [[ -z $authn_version ]]; then + AUTHN_MFE_VERSION="master" +fi + +if [[ -z $payment ]]; then + payment="false" +fi + +if [[ -z $payment_version ]]; then + PAYMENT_MFE_VERSION="master" +fi + +if [[ -z $learning ]]; then + learning="false" +fi + +if [[ -z $learning_version ]]; then + LEARNING_MFE_VERSION="master" +fi + +if [[ -z $ora_grading ]]; then + ora_grading="false" +fi + +if [[ -z $ora_grading_version ]]; then + ORA_GRADING_MFE_VERSION="master" +fi + +if [[ -z $course_authoring ]]; then + course_authoring="false" +fi + +if [[ -z $course_authoring_version ]]; then + COURSE_AUTHORING_MFE_VERSION="master" fi +if [[ -z $library_authoring ]]; then + library_authoring="false" +fi + +if [[ -z $library_authoring_version ]]; then + LIBRARY_AUTHORING_MFE_VERSION="master" +fi + +if [[ -z $profile ]]; then + profile="false" +fi + +if [[ -z $profile_version ]]; then + PROFILE_MFE_VERSION="master" +fi + +if [[ -z $learner_dashboard ]]; then + learner_dashboard="false" +fi + +if [[ -z $learner_dashboard_version ]]; then + LEARNER_DASHBOARD_MFE_VERSION="master" +fi + +# Lowercase the dns name to deal with an ansible bug +dns_name="${dns_name,,}" + deploy_host="${dns_name}.${dns_zone}" ssh-keygen -f "/var/lib/jenkins/.ssh/known_hosts" -R "$deploy_host" -cd playbooks/edx-east - -cat << EOF > $extra_vars ---- -enable_datadog: False -enable_splunkforwarder: False -enable_newrelic: False -ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem -NGINX_ENABLE_SSL: True -NGINX_SSL_CERTIFICATE: '/var/lib/jenkins/star.sandbox.edx.org.crt' -NGINX_SSL_KEY: '/var/lib/jenkins/star.sandbox.edx.org.key' -EDXAPP_LMS_SSL_NGINX_PORT: 443 -EDXAPP_CMS_SSL_NGINX_PORT: 443 -EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host} -EDXAPP_LMS_BASE: ${deploy_host} -EDXAPP_CMS_BASE: studio.${deploy_host} +cd playbooks + +cat << EOF > $extra_vars_file +EDX_PLATFORM_VERSION: $edxapp_version +FORUM_VERSION: $forum_version +XQUEUE_VERSION: $xqueue_version +CONFIGURATION_VERSION: $configuration_version +DEMO_VERSION: $demo_version +THEMES_VERSION: $themes_version +REGISTRAR_VERSION: $registrar_version +LEARNER_PORTAL_VERSION: $learner_portal_version +PROGRAM_CONSOLE_VERSION: $program_console_version +PROSPECTUS_VERSION: $prospectus_version + +edx_ansible_source_repo: ${configuration_source_repo} +edx_platform_repo: ${edx_platform_repo} + +EDXAPP_PLATFORM_NAME: $sandbox_platform_name +SANDBOX_CONFIG: True +CONFIGURE_JWTS: True + +EDXAPP_STATIC_URL_BASE: $static_url_base EDXAPP_LMS_NGINX_PORT: 80 -EDXAPP_LMS_PREVIEW_NGINX_PORT: 80 EDXAPP_CMS_NGINX_PORT: 80 -EDXAPP_SITE_NAME: ${deploy_host} -XSERVER_GRADER_DIR: "/edx/var/xserver/data/content-mit-600x~2012_Fall" -XSERVER_GRADER_SOURCE: "git@github.com:/MITx/6.00x.git" -XSERVER_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull -CERTS_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull -CERTS_AWS_KEY: $(cat /var/lib/jenkins/certs-aws-key) -CERTS_AWS_ID: $(cat /var/lib/jenkins/certs-aws-id) -CERTS_BUCKET: "verify-test.edx.org" + +ECOMMERCE_NGINX_PORT: 80 +ECOMMERCE_SSL_NGINX_PORT: 443 +ECOMMERCE_VERSION: $ecommerce_version + +CREDENTIALS_NGINX_PORT: 80 +CREDENTIALS_SSL_NGINX_PORT: 443 +CREDENTIALS_VERSION: $credentials_version + +ANALYTICS_API_NGINX_PORT: 80 +ANALYTICS_API_SSL_NGINX_PORT: 443 +ANALYTICS_API_VERSION: $analytics_api_version + +REGISTRAR_NGINX_PORT: 80 +REGISTRAR_SSL_NGINX_PORT: 443 +REGISTRAR_VERSION: $registrar_version +REGISTRAR_ENABLED: $registrar + +LEARNER_PORTAL_NGINX_PORT: 80 +LEARNER_PORTAL_SSL_NGINX_PORT: 443 +LEARNER_PORTAL_VERSION: $learner_portal_version +LEARNER_PORTAL_ENABLED: $learner_portal +LEARNER_PORTAL_SANDBOX_BUILD: True + +PROGRAM_CONSOLE_NGINX_PORT: 80 +PROGRAM_CONSOLE_SSL_NGINX_PORT: 443 +PROGRAM_CONSOLE_VERSION: $program_console_version +PROGRAM_CONSOLE_ENABLED: $program_console +PROGRAM_CONSOLE_SANDBOX_BUILD: True + +PROSPECTUS_NGINX_PORT: 80 +PROSPECTUS_SSL_NGINX_PORT: 443 +PROSPECTUS_VERSION: $prospectus_version +PROSPECTUS_ENABLED: $prospectus +PROSPECTUS_CONTENTFUL_ENVIRONMENT: $prospectus_contentful_environment +PROSPECTUS_SANDBOX_BUILD: True + +AUTHN_NGINX_PORT: 80 +AUTHN_SSL_NGINX_PORT: 443 +AUTHN_MFE_VERSION: $authn_version +AUTHN_ENABLED: $authn +AUTHN_SANDBOX_BUILD: True + +PAYMENT_NGINX_PORT: 80 +PAYMENT_SSL_NGINX_PORT: 443 +PAYMENT_MFE_VERSION: $payment_version +PAYMENT_MFE_ENABLED: $payment +PAYMENT_SANDBOX_BUILD: True + +LICENSE_MANAGER_NGINX_PORT: 80 +LICENSE_MANAGER_SSL_NGINX_PORT: 443 +LICENSE_MANAGER_VERSION: $license_manager_version +LICENSE_MANAGER_ENABLED: $license_manager +LICENSE_MANAGER_DECRYPT_CONFIG_ENABLED: true +LICENSE_MANAGER_COPY_CONFIG_ENABLED: true + +COMMERCE_COORDINATOR_NGINX_PORT: 80 +COMMERCE_COORDINATOR_SSL_NGINX_PORT: 443 +COMMERCE_COORDINATOR_VERSION: $commerce_coordinator_version +COMMERCE_COORDINATOR_ENABLED: $commerce_coordinator +COMMERCE_COORDINATOR_DECRYPT_CONFIG_ENABLED: true +COMMERCE_COORDINATOR_COPY_CONFIG_ENABLED: true + +EDX_EXAMS_NGINX_PORT: 80 +EDX_EXAMS_SSL_NGINX_PORT: 443 +EDX_EXAMS_DEFAULT_DB_NAME: 'edx_exams' +EDX_EXAMS_MYSQL_USER: 'edx_exams001' +EDX_EXAMS_MYSQL_PASSWORD: 'password' +edx_exams_service_name: 'edx_exams' +EDX_EXAMS_URL_ROOT: https://edx-exams-${deploy_host} +EDX_EXAMS_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'edx_exams-sso-key' +EDX_EXAMS_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'edx_exams-sso-secret' +EDX_EXAMS_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'edx_exams-backend-service-key' +EDX_EXAMS_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'edx_exams-backend-service-secret' +EDX_EXAMS_LOGOUT_URL: '{{ EDX_EXAMS_URL_ROOT }}/logout/' +EDX_EXAMS_SERVICE_USER_EMAIL: 'edx_exams_worker@example.com' +EDX_EXAMS_SERVICE_USER_NAME: 'edx_exams_worker' + +SUBSCRIPTIONS_DEFAULT_DB_NAME: 'subscriptions' +SUBSCRIPTIONS_MYSQL_USER: 'subscriptions001' +SUBSCRIPTIONS_MYSQL_PASSWORD: 'password' + +ENTERPRISE_CATALOG_NGINX_PORT: 80 +ENTERPRISE_CATALOG_SSL_NGINX_PORT: 443 +ENTERPRISE_CATALOG_VERSION: $enterprise_catalog_version +ENTERPRISE_CATALOG_ENABLED: $enterprise_catalog +ENTERPRISE_CATALOG_DECRYPT_CONFIG_ENABLED: true +ENTERPRISE_CATALOG_COPY_CONFIG_ENABLED: true + +DISCOVERY_NGINX_PORT: 80 +DISCOVERY_SSL_NGINX_PORT: 443 +DISCOVERY_VERSION: $discovery_version +NGINX_SET_X_FORWARDED_HEADERS: True +NGINX_REDIRECT_TO_HTTPS: True +EDX_ANSIBLE_DUMP_VARS: true migrate_db: "yes" -openid_workaround: True -edx_platform_version: $edxapp_version -forum_version: $forum_version -xqueue_version: $xqueue_version -xserver_version: $xserver_version -ora_version: $ora_version -ease_version: $ease_version -certs_version: $certs_version -discern_version: $discern_version - -rabbitmq_ip: "127.0.0.1" -rabbitmq_refresh: True -COMMON_HOSTNAME: edx-server +dns_name: $dns_name +COMMON_HOSTNAME: $dns_name +COMMON_DEPLOY_HOSTNAME: ${deploy_host} COMMON_DEPLOYMENT: edx COMMON_ENVIRONMENT: sandbox -EDXAPP_STATIC_URL_BASE: $static_url_base - -# Settings for Grade downloads -EDXAPP_GRADE_STORAGE_TYPE: 's3' -EDXAPP_GRADE_BUCKET: 'edx-grades' -EDXAPP_GRADE_ROOT_PATH: 'sandbox' - -# send logs to s3 -AWS_S3_LOGS: true -AWS_S3_LOGS_NOTIFY_EMAIL: devops+sandbox-log-sync@edx.org -AWS_S3_LOGS_FROM_EMAIL: devops@edx.org -AWS_DUMP_VARS: true +COMMON_LMS_BASE_URL: https://${deploy_host} +COMMON_ECOMMERCE_BASE_URL: https://ecommerce-${deploy_host} +nginx_default_sites: + - lms + +LEARNING_NGINX_PORT: 80 +LEARNING_SSL_NGINX_PORT: 443 +LEARNING_MFE_VERSION: $learning_version +LEARNING_MFE_ENABLED: $learning +LEARNING_SANDBOX_BUILD: True + +ORA_GRADING_NGINX_PORT: 80 +ORA_GRADING_SSL_NGINX_PORT: 443 +ORA_GRADING_MFE_VERSION: $ora_grading_version +ORA_GRADING_MFE_ENABLED: $ora_grading +ORA_GRADING_SANDBOX_BUILD: True + +COURSE_AUTHORING_NGINX_PORT: 80 +COURSE_AUTHORING_SSL_NGINX_PORT: 443 +COURSE_AUTHORING_MFE_VERSION: $course_authoring_version +COURSE_AUTHORING_MFE_ENABLED: $course_authoring +COURSE_AUTHORING_SANDBOX_BUILD: True + +LIBRARY_AUTHORING_NGINX_PORT: 80 +LIBRARY_AUTHORING_SSL_NGINX_PORT: 443 +LIBRARY_AUTHORING_MFE_VERSION: $library_authoring_version +LIBRARY_AUTHORING_MFE_ENABLED: $library_authoring +LIBRARY_AUTHORING_SANDBOX_BUILD: True + +PROFILE_NGINX_PORT: 80 +PROFILE_SSL_NGINX_PORT: 443 +PROFILE_MFE_VERSION: $profile_version +PROFILE_MFE_ENABLED: $profile +PROFILE_SANDBOX_BUILD: True + +LEARNER_DASHBOARD_NGINX_PORT: 80 +LEARNER_DASHBOARD_SSL_NGINX_PORT: 443 +LEARNER_DASHBOARD_MFE_VERSION: $learner_dashboard_version +LEARNER_DASHBOARD_MFE_ENABLED: $learner_dashboard +LEARNER_DASHBOARD_SANDBOX_BUILD: True + +mysql_server_version_5_7: True + +edxapp_container_enabled: $edxapp_container_enabled + +# User provided extra vars +$extra_vars EOF if [[ $basic_auth == "true" ]]; then # vars specific to provisioning added to $extra-vars - cat << EOF_AUTH >> $extra_vars -NGINX_HTPASSWD_USER: $auth_user -NGINX_HTPASSWD_PASS: $auth_pass + cat << EOF_AUTH >> $extra_vars_file +COMMON_ENABLE_BASIC_AUTH: True +COMMON_HTPASSWD_USER: $auth_user +COMMON_HTPASSWD_PASS: $auth_pass +XQUEUE_BASIC_AUTH_USER: $auth_user +XQUEUE_BASIC_AUTH_PASSWORD: $auth_pass EOF_AUTH + +else + cat << EOF_AUTH >> $extra_vars_file +COMMON_ENABLE_BASIC_AUTH: False +EOF_AUTH + +fi + +if [[ $mongo_version == "5.0" ]]; then + cat << MONGO_VERSION >> $extra_vars_file +MONGO_5_0_ENABLED: True +MONGO_6_0_ENABLED: False +MONGO_7_0_ENABLED: False +MONGO_VERSION +fi +if [[ $mongo_version == "6.0" ]]; then + cat << MONGO_VERSION >> $extra_vars_file +MONGO_5_0_ENABLED: False +MONGO_6_0_ENABLED: True +MONGO_7_0_ENABLED: False +MONGO_VERSION +fi +if [[ $mongo_version == "7.0" ]]; then + cat << MONGO_VERSION >> $extra_vars_file +MONGO_5_0_ENABLED: False +MONGO_6_0_ENABLED: False +MONGO_7_0_ENABLED: True +MONGO_VERSION +fi + +if [[ -n $nginx_users ]]; then + cat << EOF_AUTH >> $extra_vars_file +NGINX_USERS: $nginx_users +EOF_AUTH +fi + +if [[ $enable_client_profiling == "true" ]]; then + cat << EOF_PROFILING >> $extra_vars_file +EDXAPP_SESSION_SAVE_EVERY_REQUEST: True +EOF_PROFILING +fi + +if [[ $edx_internal == "true" ]]; then + cat << EOF >> $extra_vars_file +EDXAPP_PREVIEW_LMS_BASE: preview-${deploy_host} +EDXAPP_LMS_BASE: ${deploy_host} +EDXAPP_CMS_BASE: studio-${deploy_host} +EDXAPP_CMS_URL_ROOT: "https://{{ EDXAPP_CMS_BASE }}" +EDXAPP_SITE_NAME: ${deploy_host} +edx_internal: True +COMMON_USER_INFO: + - name: ${github_username} + github: true + type: admin +USER_CMD_PROMPT: '[$name_tag] ' +COMMON_ENABLE_NEWRELIC_APP: $enable_newrelic +COMMON_ENABLE_DATADOG: $enable_datadog +COMMON_ENABLE_DATADOG_APP: $enable_datadog +COMMON_OAUTH_BASE_URL: "https://${deploy_host}/" +FORUM_NEW_RELIC_ENABLE: $enable_newrelic +ENABLE_PERFORMANCE_COURSE: $performance_course +ENABLE_EDX_DEMO_COURSE: $edx_demo_course +EDXAPP_ENABLE_AUTO_AUTH: $enable_automatic_auth_for_testing +EDXAPP_NEWRELIC_LMS_APPNAME: sandbox-${dns_name}-edxapp-lms +EDXAPP_NEWRELIC_CMS_APPNAME: sandbox-${dns_name}-edxapp-cms +EDXAPP_NEWRELIC_WORKERS_APPNAME: sandbox-${dns_name}-edxapp-workers +XQUEUE_NEWRELIC_APPNAME: sandbox-${dns_name}-xqueue +XQUEUE_CONSUMER_NEWRELIC_APPNAME: sandbox-${dns_name}-xqueue_consumer +FORUM_NEW_RELIC_APP_NAME: sandbox-${dns_name}-forums +SANDBOX_USERNAME: $github_username +EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT: "https://ecommerce-${deploy_host}/" +EDXAPP_ECOMMERCE_API_URL: "https://ecommerce-${deploy_host}/api/v2" +EDXAPP_DISCOVERY_API_URL: "https://discovery-${deploy_host}/api/v1" +EDXAPP_COURSE_CATALOG_API_URL: "{{ EDXAPP_DISCOVERY_API_URL }}" + +ANALYTICS_API_LMS_BASE_URL: "https://{{ EDXAPP_LMS_BASE }}/" + +# NOTE: This is the same as DISCOVERY_URL_ROOT below +ECOMMERCE_DISCOVERY_SERVICE_URL: "https://discovery-${deploy_host}/" +ECOMMERCE_ECOMMERCE_URL_ROOT: "https://ecommerce-${deploy_host}/" +ECOMMERCE_LMS_URL_ROOT: "https://${deploy_host}/" +ECOMMERCE_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true +ecommerce_create_demo_data: true + +DISCOVERY_URL_ROOT: "https://discovery-${deploy_host}/" +DISCOVERY_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true + +REGISTRAR_URL_ROOT: "https://registrar-${deploy_host}/" +REGISTRAR_API_ROOT: "https://registrar-${deploy_host}/api" +REGISTRAR_DISCOVERY_BASE_URL: "https://discovery-${deploy_host}/" +REGISTRAR_LMS_BASE_URL: "https://${deploy_host}/" +REGISTRAR_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true + +LEARNER_PORTAL_URL_ROOT: "https://learner-portal-${deploy_host}/" +LEARNER_PORTAL_DISCOVERY_BASE_URL: "https://discovery-${deploy_host}/" +LEARNER_PORTAL_LMS_BASE_URL: "https://${deploy_host}/" + +PROGRAM_CONSOLE_URL_ROOT: "https://program-console-${deploy_host}/" +PROGRAM_CONSOLE_DISCOVERY_BASE_URL: "https://discovery-${deploy_host}/" +PROGRAM_CONSOLE_LMS_BASE_URL: "https://${deploy_host}/" +PROGRAM_CONSOLE_REGISTRAR_API_BASE_URL: "https://registrar-${deploy_host}/api" + +PROSPECTUS_URL_ROOT: "https://prospectus-${deploy_host}/" +OAUTH_ID: "{{ PROSPECTUS_OAUTH_ID }}" +OAUTH_SECRET: "{{ PROSPECTUS_OAUTH_SECRET }}" + +AUTHN_URL_ROOT: "https://authn-${deploy_host}/" +PAYMENT_URL_ROOT: "https://payment-${deploy_host}/" +PAYMENT_ECOMMERCE_BASE_URL: "https://ecommerce-${deploy_host}/" +PAYMENT_LMS_BASE_URL: "https://${deploy_host}/" + +credentials_create_demo_data: true +CREDENTIALS_LMS_URL_ROOT: "https://${deploy_host}/" +CREDENTIALS_DOMAIN: "credentials-${deploy_host}" +CREDENTIALS_URL_ROOT: "https://{{ CREDENTIALS_DOMAIN }}" +CREDENTIALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true +CREDENTIALS_DISCOVERY_API_URL: "{{ DISCOVERY_URL_ROOT }}/api/v1/" + +LICENSE_MANAGER_URL_ROOT: "https://license-manager-${deploy_host}/" + +COMMERCE_COORDINATOR_URL_ROOT: "https://commerce-coordinator-${deploy_host}/" + +ENTERPRISE_CATALOG_URL_ROOT: "https://enterprise-catalog-${deploy_host}/" + +EOF fi +encrypted_config_apps=(edxapp ecommerce ecommerce_worker analytics_api discovery credentials registrar edx_notes_api license_manager commerce_coordinator) + +for app in ${encrypted_config_apps[@]}; do + eval app_decrypt_and_copy_config_enabled=\${${app}_decrypt_and_copy_config_enabled} + if [[ ${app_decrypt_and_copy_config_enabled} == "true" ]]; then + cat << EOF >> $extra_vars_file +${app^^}_DECRYPT_CONFIG_ENABLED: true +${app^^}_COPY_CONFIG_ENABLED: true +EOF + fi +done if [[ $recreate == "true" ]]; then # vars specific to provisioning added to $extra-vars - cat << EOF >> $extra_vars + cat << EOF >> $extra_vars_file dns_name: $dns_name keypair: $keypair instance_type: $instance_type security_group: $security_group ami: $ami -region: $region +region: $region zone: $zone -instance_tags: +instance_initiated_shutdown_behavior: $instance_initiated_shutdown_behavior +instance_tags: environment: $environment github_username: $github_username Name: $name_tag source: jenkins owner: $BUILD_USER + instance_termination_time: $TERMINATION_DATE_TIME + datadog: monitored root_ebs_size: $root_ebs_size name_tag: $name_tag -gh_users: - - ${github_username} dns_zone: $dns_zone -rabbitmq_refresh: True -GH_USERS_PROMPT: '[$name_tag] ' elb: $elb EOF + + if [[ $server_type == "full_edx_installation" ]]; then + extra_var_arg+=' -e instance_userdata="" -e launch_wait_time=0 -e elb_pre_post=false' + fi # run the tasks to launch an ec2 instance from AMI - cat $extra_vars - ansible-playbook edx_provision.yml -i inventory.ini -e "@${extra_vars}" --user ubuntu + cat $extra_vars_file + run_ansible edx_provision.yml -i inventory.ini $extra_var_arg --user ubuntu if [[ $server_type == "full_edx_installation" ]]; then # additional tasks that need to be run if the # entire edx stack is brought up from an AMI - ansible-playbook rabbitmq.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu - ansible-playbook restart_supervisor.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu + run_ansible redis.yml -i "${deploy_host}," $extra_var_arg --user ubuntu + run_ansible restart_supervisor.yml -i "${deploy_host}," $extra_var_arg --user ubuntu fi fi +# ansible overrides for master's integration environment setup +if [[ $masters_integration_environment == "true" ]]; then + cat << EOF >> $extra_vars_file +COMMON_ENABLE_SPLUNKFORWARDER: true +EDXAPP_ENABLE_ENROLLMENT_RESET: true +DISCOVERY_POST_MIGRATE_COMMANDS: + - command: "./manage.py remove_program_types_from_migrations" + when: true + - command: > + ./manage.py createsuperuser + --username="admin" + --email="admin@example.com" + --no-input + when: true +registrar_post_migrate_commands: + - command: > + ./manage.py createsuperuser + --username="admin" + --email="admin@example.com" + --no-input + when: true +EOF +fi + declare -A deploy -roles="edxapp forum xqueue xserver ora discern certs demo" -for role in $roles; do - deploy[$role]=${!role} +plays="prospectus edxapp forum ecommerce credentials discovery enterprise_catalog analyticsapi xqueue certs demo testcourses registrar program_console learner_portal" + +for play in $plays; do + deploy[$play]=${!play} done -# If reconfigure was selected or if starting from an ubuntu 12.04 AMI -# run non-deploy tasks for all roles -if [[ $reconfigure == "true" || $server_type == "ubuntu_12.04" ]]; then - cat $extra_vars - ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --skip-tags deploy +# If reconfigure was selected or if starting from an ubuntu 16.04 AMI +# run non-deploy tasks for all plays +if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scratch" || $server_type == "ubuntu_20.04" ]]; then + cat $extra_vars_file + if [[ $edxapp_container_enabled == "true" ]]; then + cat << EOF > $WORKSPACE/edxapp_extra_var.yml +edxapp_containerized: true +CAN_GENERATE_NEW_JWT_SIGNATURE: false +EOF + ansible -i "${deploy_host}," $deploy_host -m include_role -a "name=memcache" -u ubuntu -b + for playbook in redis $mongo_version; do + run_ansible $playbook.yml -i "${deploy_host}," $extra_var_arg --user ubuntu + done + run_ansible edx_continuous_integration.yml -i "${deploy_host}," $extra_var_arg --user ubuntu --tags "edxlocal" + # create fluentd container for processing tracking logs + provision_fluentd_script="/var/tmp/provision-fluentd-script.sh" + cat << EOF > $provision_fluentd_script +$(provision_fluentd) +EOF + ansible -c ssh -i "${deploy_host}," $deploy_host -m script -a "${provision_fluentd_script}" -u ubuntu -b + + rm -f "${provision_fluentd_script}" + + # decrypt lms config file + asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path $WORKSPACE/configuration-internal/sandbox-remote-config/sandbox/lms.yml --private_key_path $WORKSPACE/private.key --outfile_path $WORKSPACE/lms.yml + # decrypt cms config file + asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path $WORKSPACE/configuration-internal/sandbox-remote-config/sandbox/studio.yml --private_key_path $WORKSPACE/private.key --outfile_path $WORKSPACE/cms.yml + + sed -i "s/deploy_host/${dns_name}.${dns_zone}/g" $WORKSPACE/lms.yml + sed -i "s/deploy_host/${dns_name}.${dns_zone}/g" $WORKSPACE/cms.yml + + # Remove exiting private requirements if found + if [[ -f "$WORKSPACE/dockerfiles-internal/edx-platform-private/private_requirements.txt" ]] ; then + rm -f $WORKSPACE/dockerfiles-internal/edx-platform-private/private_requirements.txt + fi + + # Extract private requirements for sandbox + readarray app_private_requirements < <(cat $WORKSPACE/configuration/playbooks/roles/edxapp/defaults/main.yml | $WORKSPACE/yq e -o=j -I=0 '.EDXAPP_PRIVATE_REQUIREMENTS[]') + for app_private_requirement in "${app_private_requirements[@]}"; do + if ! $(echo ${app_private_requirement} | $WORKSPACE/yq '. | has("extra_args")' -) ; then + req_name=$(echo "${app_private_requirement}" | $WORKSPACE/yq -e '.name' -) + echo -e "${req_name}" >> $WORKSPACE/dockerfiles-internal/edx-platform-private/private_requirements.txt + else + req_name=$(echo "${app_private_requirement}" | $WORKSPACE/yq -e '.name' -) + req_extra_args=$(echo "${app_private_requirement}" | $WORKSPACE/yq -e '.extra_args' -) + echo -e "${req_extra_args} ${req_name}" >> $WORKSPACE/dockerfiles-internal/edx-platform-private/private_requirements.txt + fi + done + + # copy app config file + ansible -c ssh -i "${deploy_host}," $deploy_host -m copy -a "src=$WORKSPACE/lms.yml dest=/var/tmp/lms.yml" -u ubuntu -b + ansible -c ssh -i "${deploy_host}," $deploy_host -m copy -a "src=$WORKSPACE/cms.yml dest=/var/tmp/cms.yml" -u ubuntu -b + # copy private Dockerfile and requirements file + ansible -c ssh -i "${deploy_host}," $deploy_host -m copy -a "src=$WORKSPACE/dockerfiles-internal/edx-platform-private dest=/var/tmp/" -u ubuntu -b + + set +x + + app_git_ssh_key=$(aws secretsmanager get-secret-value --secret-id $configuration_secure_secret --query SecretString --output text | jq -r '._local_git_identity') + + # specify variable names + app_hostname="courses" + app_service_name="lms" + app_name="edxapp" + app_repo="edx-platform" + app_version=$edxapp_version + app_gunicorn_port=8000 + app_cfg=LMS_CFG + app_admin_password=SANDBOX_ADMIN_PASSWORD + + app_provision_script="/var/tmp/app-container-provision-script-$$.sh" + + write_app_deployment_script $app_provision_script + set -x + + ssh \ + -o ControlMaster=auto \ + -o ControlPersist=60s \ + -o "ControlPath=/tmp/${app_service_name}-ssh-%h-%p-%r" \ + -o ServerAliveInterval=30 \ + -o ConnectTimeout=10 \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + ubuntu@${deploy_host} "sudo -n -s bash" < $app_provision_script + + rm -f "${app_provision_script}" + + # create CMS provision script + # specify variable names + app_hostname="studio" + app_service_name="cms" + app_name="edxapp" + app_repo="edx-platform" + app_version=$edxapp_version + app_gunicorn_port=8010 + app_cfg=CMS_CFG + + app_provision_script="/var/tmp/app-container-provision-script-$$.sh" + + write_app_deployment_script $app_provision_script + set -x + + ssh \ + -o ControlMaster=auto \ + -o ControlPersist=60s \ + -o "ControlPath=/tmp/${app_service_name}-ssh-%h-%p-%r" \ + -o ServerAliveInterval=30 \ + -o ConnectTimeout=10 \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + ubuntu@${deploy_host} "sudo -n -s bash" < $app_provision_script + + rm -f "${app_provision_script}" + + # set admin password for demo users + set +x + admin_hashed_password="$($WORKSPACE/yq '.SANDBOX_ADMIN_PASSWORD' $WORKSPACE/configuration-internal/ansible/vars/developer-sandbox.yml)" + + # create demo course and test users + demo_course_provision_script="/var/tmp/demo-provision-script.sh" + write_demo_course_script $demo_course_provision_script + set -x + + ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@${deploy_host} "sudo -n -s bash" < $demo_course_provision_script + + rm -f "${demo_course_provision_script}" + + # edxapp celery workers + # Export LC_* vars. To be passed to remote instance via SSH where SSH configuration allows LC_* to be accepted as environment variables. + # LC_* is normally used for passing through locale settings of SSH clients to SSH servers. + export LC_WORKER_CFG=$(cat < $WORKSPACE/edxapp_extra_var.yml +edxapp_containerized: false +EOF + run_ansible edx_continuous_integration.yml -i "${deploy_host}," $extra_var_arg -e @$WORKSPACE/edxapp_extra_var.yml --user ubuntu + fi fi -# Run deploy tasks for the roles selected -for i in $roles; do - if [[ ${deploy[$i]} == "true" ]]; then - cat $extra_vars - ansible-playbook ${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy - fi -done +if [[ $reconfigure != "true" && $server_type == "full_edx_installation" ]]; then + # Run deploy tasks for the plays selected + for i in $plays; do + if [[ ${deploy[$i]} == "true" ]]; then + cat $extra_vars_file + run_ansible ${i}.yml -i "${deploy_host}," $extra_var_arg --user ubuntu + if [[ ${i} == "edxapp" ]]; then + run_ansible worker.yml -i "${deploy_host}," $extra_var_arg --user ubuntu + fi + fi + done +fi + +# deploy the edx_ansible play +run_ansible edx_ansible.yml -i "${deploy_host}," $extra_var_arg --user ubuntu +cat $sandbox_internal_vars_file $extra_vars_file | grep -v -E "_version|migrate_db" > ${extra_vars_file}_clean +ansible -c ssh -i "${deploy_host}," $deploy_host -m copy -a "src=${extra_vars_file}_clean dest=/edx/app/edx_ansible/server-vars.yml" -u ubuntu -b +ret=$? +if [[ $ret -ne 0 ]]; then + exit $ret +fi + +#if [[ $run_oauth == "true" ]]; then +# # Setup the OAuth2 clients +# run_ansible oauth_client_setup.yml -i "${deploy_host}," $extra_var_arg --user ubuntu +#fi + +# set the hostname +run_ansible set_hostname.yml -i "${deploy_host}," -e hostname_fqdn=${deploy_host} --user ubuntu + +# master's integration environment setup +if [[ $masters_integration_environment == "true" ]]; then + # vars specific to master's integration environment + cat << EOF >> $extra_vars_file +username: $registrar_user_email +email: $registrar_user_email +organization_key: $registrar_org_key +registrar_role: "organization_read_write_enrollments" +EOF + run_ansible masters_sandbox.yml -i "${deploy_host}," $extra_var_arg --user ubuntu +fi + +# prospectus sandbox +if [[ $prospectus == "true" ]]; then + run_ansible prospectus_sandbox.yml -i "${deploy_host}," $extra_var_arg --user ubuntu +fi -# deploy the edx_ansible role -ansible-playbook edx_ansible.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu +if [[ $enable_newrelic == "true" ]]; then + run_ansible run_role.yml -i "${deploy_host}," -e role=newrelic_infrastructure $extra_var_arg --user ubuntu +fi + +if [[ $edx_exams == 'true' ]]; then + set +x + + app_git_ssh_key=$(aws secretsmanager get-secret-value --secret-id $configuration_secure_secret --query SecretString --output text | jq -r '._local_git_identity') + + app_hostname="edx-exams" + app_service_name="edx_exams" + app_name="edx-exams" + app_repo="edx-exams" + app_version=$edx_exams_version + app_gunicorn_port=18740 + app_cfg=EDX_EXAMS_CFG + + app_provision_script="/var/tmp/app-container-provision-script-$$.sh" + + write_app_deployment_script $app_provision_script + set -x + + sed -i "s/deploy_host/${dns_name}.${dns_zone}/g" $WORKSPACE/configuration-internal/k8s-sandbox-config/$app_service_name.yml + ansible -c ssh -i "${deploy_host}," $deploy_host -m copy -a "src=${WORKSPACE}/configuration-internal/k8s-sandbox-config/${app_service_name}.yml dest=/var/tmp/${app_service_name}.yml" -u ubuntu -b + ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@${deploy_host} "sudo -n -s bash" < $app_provision_script + rm -f "${app_provision_script}" +fi + +if [[ $subscriptions == 'true' ]]; then + set +x + + app_git_ssh_key=$(aws secretsmanager get-secret-value --secret-id $configuration_secure_secret --query SecretString --output text | jq -r '._local_git_identity') + + app_hostname="subscriptions" + app_service_name="subscriptions" + app_name="subscriptions" + app_repo="subscriptions" + app_version=$subscriptions_version + app_gunicorn_port=18750 + app_cfg=SUBSCRIPTIONS_CFG + app_repo_is_private=true + + app_provision_script="/var/tmp/app-container-provision-script-$$.sh" + + write_app_deployment_script $app_provision_script + set -x + + sed -i "s/deploy_host/${dns_name}.${dns_zone}/g" $WORKSPACE/configuration-internal/k8s-sandbox-config/$app_service_name.yml + ansible -c ssh -i "${deploy_host}," $deploy_host -m copy -a "src=${WORKSPACE}/configuration-internal/k8s-sandbox-config/${app_service_name}.yml dest=/var/tmp/${app_service_name}.yml" -u ubuntu -b + ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@${deploy_host} "sudo -n -s bash" < $app_provision_script + rm -f "${app_provision_script}" +fi -rm -f "$extra_vars" +rm -f "$extra_vars_file" +rm -f ${extra_vars_file}_clean diff --git a/util/jenkins/app-container-provisioner.sh b/util/jenkins/app-container-provisioner.sh new file mode 100644 index 00000000000..1542b360d53 --- /dev/null +++ b/util/jenkins/app-container-provisioner.sh @@ -0,0 +1,264 @@ +#!/usr/bin/env bash + +set -ex + +function write_app_deployment_script() { + cat < "$1" +#!/usr/bin/env bash + +set -ex + +# Install yq for yaml processing +wget https://github.com/mikefarah/yq/releases/download/v4.27.5/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq + +chown :www-data /var/tmp/${app_service_name}.yml + +if [[ ${app_service_name} == 'lms' ]] ; then + chown :www-data /var/tmp/cms.yml +fi + +if [[ ${app_service_name} != 'cms' && ${app_service_name} != 'lms' ]] ; then + # Create app staticfiles dir + mkdir /edx/var/${app_service_name}/staticfiles/ -p && chmod 777 /edx/var/${app_service_name} -R +fi + +# if application is lms, download and setup themes +if [[ ${app_service_name} == 'lms' && ! -d /edx/var/edx-themes ]] ; then + set +x + echo -e "${app_git_ssh_key}" > /tmp/theme_ssh_key + set -x + chmod 0600 /tmp/theme_ssh_key + useradd -m -d /edx/var/edx-themes edx-themes -G www-data + GIT_SSH_COMMAND="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i /tmp/theme_ssh_key" git clone git@github.com:edx/edx-themes.git /edx/var/edx-themes/edx-themes + cd /edx/var/edx-themes/edx-themes && git checkout ${themes_version} + chown -R edx-themes:www-data /edx/var/edx-themes + sudo -u edx-themes git config --global safe.directory '/edx/var/edx-themes/edx-themes' + rm -rf /tmp/theme_ssh_key +fi + +# checkout git repo +if [ ! -d "/edx/app/${app_name}" ]; then + mkdir /edx/app/${app_name} +fi + +if [[ ! -d "/edx/app/${app_name}/${app_repo}" ]] ; then + + # use SSH to clone if repo is private + if [[ "$app_repo_is_private" = true ]] ; then + set +x + echo -e "${app_git_ssh_key}" > /tmp/${app_service_name}_ssh_key + set -x + chmod 0600 /tmp/${app_service_name}_ssh_key + useradd -m -d /edx/var/${app_service_name} ${app_service_name} -G www-data + GIT_SSH_COMMAND="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i /tmp/${app_service_name}_ssh_key" git clone git@github.com:edx/${app_repo}.git /edx/app/${app_name}/${app_repo} + else + git clone https://github.com/edx/${app_repo}.git /edx/app/${app_name}/${app_repo} + fi + cd /edx/app/${app_name}/${app_repo} && git checkout ${app_version} +fi + +# Generate container image if it doesn't already exist +if ! $(docker image inspect ${app_image_name} >/dev/null 2>&1 && echo true || echo false) ; then + cd /edx/app/${app_name}/${app_repo} + export DOCKER_BUILDKIT=1 + if [[ ${app_service_name} == 'lms' || ${app_service_name} == 'cms' ]]; then + docker build . -t ${app_repo}:base --target base + cd /var/tmp/edx-platform-private + docker build . --build-arg BASE_IMAGE=${app_repo} --build-arg BASE_TAG=base -t ${app_repo}:latest + else + docker build . -t ${app_repo}:latest + fi +fi + +# if lms, create image (if it doesn't exist) and generate JWT credentials +if [[ ${app_service_name} == 'lms' ]]; then + touch /tmp/lms_jwt_signature.yml && chmod 777 /tmp/lms_jwt_signature.yml + # generate JWT token, ensure JWT file is mounted as volume + docker run --network=host --rm -u='www-data' -e LMS_CFG=/edx/etc/${app_service_name}.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.envs.docker-production -e SERVICE_VARIANT=${app_service_name} -e EDX_PLATFORM_SETTINGS=docker-production -v /tmp/lms_jwt_signature.yml:/tmp/lms_jwt_signature.yml -v /var/tmp/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /edx/var/edx-themes:/edx/var/edx-themes ${app_repo}:latest python3 manage.py lms generate_jwt_signing_key --output-file /tmp/lms_jwt_signature.yml --strip-key-prefix +fi + +# Combine app config with jwt_signature config +cat /var/tmp/${app_service_name}.yml /tmp/lms_jwt_signature.yml > /edx/etc/${app_service_name}.yml + +chown :www-data /edx/etc/${app_service_name}.yml + +if [[ ${app_service_name} == 'lms' || ${app_service_name} == 'cms' ]]; then + # run migrations + docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e ${app_cfg}=/edx/etc/${app_service_name}.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.envs.docker-production -e SERVICE_VARIANT=${app_service_name} -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py ${app_service_name} showmigrations --database default + docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e ${app_cfg}=/edx/etc/${app_service_name}.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.envs.docker-production -e SERVICE_VARIANT=${app_service_name} -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py ${app_service_name} migrate --database default --noinput + docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e ${app_cfg}=/edx/etc/${app_service_name}.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.envs.docker-production -e SERVICE_VARIANT=${app_service_name} -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py ${app_service_name} showmigrations --database student_module_history + docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e ${app_cfg}=/edx/etc/${app_service_name}.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.envs.docker-production -e SERVICE_VARIANT=${app_service_name} -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py ${app_service_name} migrate --database student_module_history --noinput +else + # Run app migrations + docker run --network=host --rm -u='www-data' -e ${app_cfg}=/edx/etc/${app_service_name}.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.settings.production -v /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /edx/var/${app_name}:/edx/var/${app_name} -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py migrate + # Generate static assets + docker run --network=host --rm -u='root' -e ${app_cfg}=/edx/etc/${app_service_name}.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.settings.production -v /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /edx/var/${app_service_name}/staticfiles/:/var/tmp/ -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py collectstatic --noinput +fi + +# Setup oauth clients for service other than CMS as part of the LMS setup +if [[ ${app_service_name} == 'lms' ]]; then + service_worker_users=(enterprise veda discovery credentials insights registrar designer license_manager commerce_coordinator enterprise_catalog ecommerce retirement edx_exams subscriptions) + # Provision IDA User in LMS + for service_worker in "\${service_worker_users[@]}"; do + app_hostname=\${service_worker/_/-} + docker run --network=host --rm -u='www-data' -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock edx-platform:latest python3 manage.py lms manage_user \${service_worker}_worker \${service_worker}_worker@example.com --staff --superuser + + # Create the DOT applications - one for single sign-on and one for backend service IDA-to-IDA authentication. + docker run --network=host --rm -u='www-data' -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock edx-platform:latest python3 manage.py lms create_dot_application --grant-type authorization-code --skip-authorization --redirect-uris "https://${app_hostname}-${dns_name}.${dns_zone}/complete/edx-oauth2/" --client-id "\${service_worker}-sso-key" --client-secret "\${service_worker}-sso-secret" --scopes 'user_id' \${service_worker}-sso \${service_worker}_worker + docker run --network=host --rm -u='www-data' -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock edx-platform:latest python3 manage.py lms create_dot_application --grant-type client-credentials --client-id "\${service_worker}-backend-service-key" --client-secret "\${service_worker}-backend-service-secret" \${service_worker}-backend-service \${service_worker}_worker + done +fi + +# oauth client setup +if [[ ${app_service_name} != 'lms' && ${edxapp_container_enabled} == 'true' ]]; then + # Provision IDA User in LMS + docker run --network=host --rm -u='www-data' -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock edx-platform:latest python3 manage.py lms manage_user $(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}_; fi)${app_service_name}_worker $(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}_; fi)${app_service_name}_worker@example.com --staff --superuser + + # Create the DOT applications - one for single sign-on and one for backend service IDA-to-IDA authentication. + docker run --network=host --rm -u='www-data' -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock edx-platform:latest python3 manage.py lms create_dot_application --grant-type authorization-code --skip-authorization --redirect-uris 'https://${app_hostname}-${dns_name}.${dns_zone}/complete/edx-oauth2/' --client-id '$(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}-; fi)${app_service_name}-sso-key' --client-secret '$(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}-; fi)${app_service_name}-sso-secret' --scopes 'user_id' $(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}-; fi)${app_service_name}-sso $(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}_; fi)${app_service_name}_worker + docker run --network=host --rm -u='www-data' -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock edx-platform:latest python3 manage.py lms create_dot_application --grant-type client-credentials --client-id '$(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}-; fi)${app_service_name}-backend-service-key' --client-secret '$(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}-; fi)${app_service_name}-backend-service-secret' $(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}-; fi)${app_service_name}-backend-service $(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}_; fi)${app_service_name}_worker +fi + +# generate lms/cms static assets +if [[ ${app_service_name} == 'lms' ]]; then + # temporary hack, create npm-install.log file + touch /edx/app/edxapp/edx-platform/test_root/log/npm-install.log + docker run --network=host --rm -u='root' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e LMS_CFG=/edx/etc/${app_service_name}.yml -e CMS_CFG=/edx/etc/cms.yml -e DJANGO_SETTINGS_MODULE=${app_service_name}.envs.docker-production -e SERVICE_VARIANT=${app_service_name} -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml -v /var/tmp/cms.yml:/edx/etc/cms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/${app_name}:/edx/var/${app_name} -v /edx/app/edxapp/edx-platform/test_root/log/npm-install.log:/edx/app/edxapp/edx-platform/test_root/log/npm-install.log -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest paver update_assets --debug-collect --settings=docker-production +fi + +# Generate docker-compose file for app service +cat < /home/$github_username/docker-compose-${app_service_name}.yml +version: "3.9" +services: + ${app_service_name}: + image: ${app_repo}:latest + stdin_open: true + tty: true + container_name: ${app_service_name} + command: bash -c "gunicorn --workers=2 --name ${app_service_name} -c /edx/app/$(if [[ ${app_name} == 'edxapp' ]]; then echo ${app_name}/; fi)${app_repo}/${app_service_name}/$(if [[ ${app_name} == 'edxapp' ]]; then echo docker_${app_service_name}_gunicorn.py; else echo docker_gunicorn_configuration.py; fi) --log-file - --max-requests=1000 ${app_service_name}.wsgi:application" + user: "www-data:www-data" + network_mode: 'host' + restart: on-failure + environment: + - EDX_REST_API_CLIENT_NAME=sandbox-edx-${app_service_name} +$( + if [[ ${app_service_name} == 'lms' || ${app_service_name} == 'cms' ]]; then + echo -e " - DJANGO_SETTINGS_MODULE=${app_service_name}.envs.docker-production" + echo -e " - EDX_PLATFORM_SETTINGS=docker-production" + echo -e " - SERVICE_VARIANT=${app_service_name}" + echo -e " - ${app_cfg}=/edx/etc/${app_service_name}.yml" + else + echo -e " - DJANGO_SETTINGS_MODULE=${app_service_name}.settings.production" + echo -e " - ${app_cfg}=/${app_service_name}.yml" + fi +) + volumes: + - /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock +$( + if [[ ${app_service_name} == 'lms' || ${app_service_name} == 'cms' ]]; then + echo -e " - /edx/var/${app_name}:/edx/var/${app_name}" + echo -e " - /edx/var/edx-themes:/edx/var/edx-themes" + echo -e " - /var/tmp/tracking_logs.log:/var/tmp/tracking_logs.log" + echo -e " - /edx/etc/${app_service_name}.yml:/edx/etc/${app_service_name}.yml" + else + echo -e " - /edx/var/${app_service_name}/staticfiles/:/var/tmp/" + echo -e " - /edx/etc/${app_service_name}.yml:/${app_service_name}.yml" + fi +) +$( + if [[ ${app_service_name} == 'cms' ]]; then + echo -e " - /edx/app/demo:/edx/app/demo" + fi +) +EOT + +docker-compose -f /home/$github_username/docker-compose-${app_service_name}.yml up -d + +EOF + + + + + + + +# # Create app database +# echo "mysql -uroot -e \"CREATE DATABASE \\\`${app_service_name}\\\`;\"" +# +# # use heredoc to dynamically create docker compose file +# echo "docker_compose_file=/var/tmp/docker-compose-${app_service_name}.yml" +# echo "cat << 'EOF' > \$docker_compose_file +# version: '2.1' +# services: +# app: +# image: ${app_service_name}:latest +# stdin_open: true +# tty: true +# build: +# context: /edx/app/${app_repo} +# dockerfile: Dockerfile +# container_name: ${app_service_name}.app +# command: bash -c 'while true; do exec gunicorn --workers=2 --name ${app_service_name} -c /edx/app/${app_repo}/${app_service_name}/docker_gunicorn_configuration.py --log-file - --max-requests=1000 ${app_service_name}.wsgi:application; sleep 2; done' +# network_mode: 'host' +# environment: +# DJANGO_SETTINGS_MODULE: ${app_service_name}.settings.production +# DJANGO_WATCHMAN_TIMEOUT: 30 +# ENABLE_DJANGO_TOOLBAR: 1 +# ${app_cfg}: /${app_service_name}.yml +# volumes: +# - /edx/app/${app_repo}:/edx/app/${app_repo}/ +# - /edx/etc/${app_service_name}.yml:/${app_service_name}.yml +# - /edx/var/${app_service_name}/staticfiles/:/var/tmp/ +#EOF" +# +# # run docker compose to spin up service container +# echo "docker-compose -f \$docker_compose_file up -d" +# +# # Wait for app container +# echo "sleep 5" +# +# # Run migrations +# echo "docker exec -t ${app_service_name}.app bash -c \"python3 manage.py migrate\"" +# +# # Run collectstatic +# echo "docker exec -t ${app_service_name}.app bash -c \"python3 manage.py collectstatic --noinput\"" +# # Create superuser +# echo "docker exec -t ${app_service_name}.app bash -c \"echo 'from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser(\\\"edx\\\", \\\"edx@example.com\\\", \\\"edx\\\") if not User.objects.filter(username=\\\"edx\\\").exists() else None' | python /edx/app/${app_repo}/manage.py shell\"" +# +# # Create Nginx config +# echo "site_config=/edx/app/nginx/sites-available/${app_service_name}" +# echo "cat << 'EOF' > \$site_config +# server { +# server_name ~^((stage|prod)-)?${app_hostname}.*; +# listen 80; +# rewrite ^ https://\$host\$request_uri? permanent; +# } +# server { +# server_name ~^((stage|prod)-)?${app_hostname}.*; +# listen 443 ssl; +# ssl_certificate /etc/ssl/certs/wildcard.sandbox.edx.org.pem; +# ssl_certificate_key /etc/ssl/private/wildcard.sandbox.edx.org.key; +# +# location / { +# try_files \$uri @proxy_to_app; +# } +# location ~ ^/(api)/ { +# try_files \$uri @proxy_to_app; +# } +# location @proxy_to_app { +# proxy_set_header X-Forwarded-Proto \$scheme; +# proxy_set_header X-Forwarded-Port \$server_port; +# proxy_set_header X-Forwarded-For \$remote_addr; +# proxy_set_header Host \$http_host; +# proxy_redirect off; +# proxy_pass http://127.0.0.1:${app_gunicorn_port}; +# } +# location ~ ^/static/(?P.*) { +# root /edx/var/${app_service_name}; +# try_files /staticfiles/\$file =404; +# } +# } +#EOF" +# echo "ln -s /edx/app/nginx/sites-available/${app_service_name} /etc/nginx/sites-enabled/${app_service_name}" +# echo "service nginx reload" +} diff --git a/util/jenkins/ascii-convert.sh b/util/jenkins/ascii-convert.sh index 2a4adbd3a95..4316471d964 100644 --- a/util/jenkins/ascii-convert.sh +++ b/util/jenkins/ascii-convert.sh @@ -9,5 +9,3 @@ BUILD_USER_LAST_NAME=$(ascii_convert $BUILD_USER_LAST_NAME) BUILD_USER_FIRST_NAME=$(ascii_convert $BUILD_USER_FIRST_NAME) BUILD_USER_ID=$(ascii_convert $BUILD_USER_ID) BUILD_USER=$(ascii_convert $BUILD_USER) - - diff --git a/util/jenkins/assume-role.sh b/util/jenkins/assume-role.sh new file mode 100644 index 00000000000..28a587a7267 --- /dev/null +++ b/util/jenkins/assume-role.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# assume-role function for use by machine services that don't use MFA to assume role. +# source this into your bash script and then +# +# assume-role(${AWS_ROLE_ARN}) +# +# The function turns off echoing, so no tokens are exposed. +# If you wish to hide your Role's ARN, you can set +x before calling the function. + +assume-role() { + set +x + ROLE_ARN="${1}" + SESSIONID=$(date +"%s") + DURATIONSECONDS="${2:-3600}" + + RESULT=(`aws sts assume-role --role-arn $ROLE_ARN \ + --role-session-name $SESSIONID \ + --duration-seconds $DURATIONSECONDS \ + --query '[Credentials.AccessKeyId,Credentials.SecretAccessKey,Credentials.SessionToken]' \ + --output text`) + + export AWS_ACCESS_KEY_ID=${RESULT[0]} + export AWS_SECRET_ACCESS_KEY=${RESULT[1]} + export AWS_SECURITY_TOKEN=${RESULT[2]} + export AWS_SESSION_TOKEN=${AWS_SECURITY_TOKEN} + set -x +} + +unassume-role () { + unset AWS_ACCESS_KEY_ID + unset AWS_SECRET_ACCESS_KEY + unset AWS_SECURITY_TOKEN + unset AWS_SESSION_TOKEN +} diff --git a/util/jenkins/check-ses-limits.py b/util/jenkins/check-ses-limits.py new file mode 100755 index 00000000000..5bd0472f6d8 --- /dev/null +++ b/util/jenkins/check-ses-limits.py @@ -0,0 +1,56 @@ +#!/usr/bin/python3 + +# This script is used by the monioring/check-seslimits Jenkins job + +import boto3 +import argparse +import sys + + +# Copied from https://stackoverflow.com/a/41153081 +class ExtendAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + items = getattr(namespace, self.dest) or [] + items.extend(values) + setattr(namespace, self.dest, items) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--critical', required=True, type=float, + help="Critical threshold in percentage") + parser.add_argument('-w', '--warning', required=False, type=float, + help="Warning threshold in percentage (Optional)") + parser.add_argument('-r', '--region', dest='regions', nargs='+', + action=ExtendAction, required=True, + help="AWS regions to check") + args = parser.parse_args() + + if args.warning and args.warning >= args.critical: + warn_str = f"Warning threshold ({args.warning})" + crit_str = f"Critical threshold ({args.critical})" + print(f"ERROR: {warn_str} >= {crit_str}") + sys.exit(1) + + exit_code = 0 + + session = boto3.session.Session() + for region in args.regions: + ses = session.client('ses', region_name=region) + data = ses.get_send_quota() + limit = data["Max24HourSend"] + current = data["SentLast24Hours"] + percent = current/limit + level = None + + if percent >= args.critical: + level = "CRITICAL" + elif args.warning and percent >= args.warning: + level = "WARNING" + + if level: + print("{} {}/{} ({}%) - {}".format(region, current, limit, percent, + level)) + exit_code += 1 + + sys.exit(exit_code) diff --git a/util/jenkins/check_table_size/check_table_size.py b/util/jenkins/check_table_size/check_table_size.py new file mode 100644 index 00000000000..bf047da354f --- /dev/null +++ b/util/jenkins/check_table_size/check_table_size.py @@ -0,0 +1,152 @@ +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import click + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'Endpoint': Endpoint of RDS + 'Port': Port of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + for region in regions_list["Regions"]: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + # This condition use to skip irrelevant RDS + if ("prod" in instance.get("Endpoint").get("Address") or "stage" in instance.get("Endpoint").get("Address")) and "test" not in instance["DBInstanceIdentifier"]: + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + return rds_list + + +def check_table_growth(rds_list, username, password, threshold, rds_threshold): + """ + Return: + Return list all tables that cross threshold limit + [ + { + "name": "string", + "db": "string", + "table": "string", + "size": "string", + } + ] + """ + try: + table_list = [] + for db in rds_list: + print("Checking table sizes for {}".format(db["Endpoint"])) + rds_host_endpoint = db["Endpoint"] + rds_port = db["Port"] + connection = pymysql.connect(host=rds_host_endpoint, + port=rds_port, user=username, password=password) + # prepare a cursor object using cursor() method + cursor = connection.cursor() + # execute SQL query using execute() method. + cursor.execute(""" + SELECT + table_schema as `Database`, + table_name AS `Table`, + round(((data_length + index_length) / 1024 / 1024), 2) `Size in MB` + FROM information_schema.TABLES + WHERE TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema') + ORDER BY (data_length + index_length) DESC; + """) + + rds_result = cursor.fetchall() + cursor.close() + connection.close() + if db["name"] in rds_threshold: + threshold_limit = rds_threshold[db["name"]] + else: + threshold_limit = threshold + for tables in rds_result: + temp_dict = {} + if tables[2] is not None and tables[2] > float(threshold_limit): + temp_dict["rds"] = db["name"] + temp_dict["db"] = tables[0] + temp_dict["table"] = tables[1] + temp_dict["size"] = tables[2] + table_list.append(temp_dict) + return table_list + except Exception as ex: + print(ex) + sys.exit(1) + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--threshold', required=True, help='Threshold for tables') +@click.option('--rdsthreshold', type=(str, int), multiple=True, help='Specific RDS threshold') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +def controller(username, password, threshold, rdsthreshold, rdsignore): + """ + Control execution of all other functions + Arguments: + username (str): + Get this from cli args + + password (str): + Get this from cli args + threshold (str): + Get this from cli args + rdsthreshold (str, int): + Get this from cli args + """ + rds_threshold = dict(rdsthreshold) + rds_list = rds_extractor() + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + table_list = check_table_growth(filtered_rds_list, username, password, threshold, rds_threshold) + if len(table_list) > 0: + format_string = "{:<40}{:<20}{:<50}{}" + print(format_string.format("RDS Name","Database Name", "Table Name", "Size")) + for items in table_list: + print(format_string.format(items["rds"], items["db"], items["table"], str(items["size"]) + " MB")) + exit(1) + exit(0) + + +if __name__ == '__main__': + controller() + diff --git a/util/jenkins/check_table_size/requirements.txt b/util/jenkins/check_table_size/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/check_table_size/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/cloudflare-hit-rate.py b/util/jenkins/cloudflare-hit-rate.py new file mode 100644 index 00000000000..203649cdcff --- /dev/null +++ b/util/jenkins/cloudflare-hit-rate.py @@ -0,0 +1,50 @@ +""" +CloudFlare API +https://api.cloudflare.com/#zone-analytics-dashboard + +""" +import requests +import argparse +import sys + +CLOUDFLARE_API_ENDPOINT = "https://api.cloudflare.com/client/v4/" + + +def calcualte_cache_hit_rate(zone_id, auth_key, email, threshold): + HEADERS = {"Accept": "application/json", + "X-Auth-Key": auth_key, + "X-Auth-Email": email} + # for the past one hour, -59 indicates minutes, we can go + # beyond that as well, for example for last 15 + # hours it will be -899 + PARAMS = {"since": "-59", "continuous": "true"} + res = requests.get(CLOUDFLARE_API_ENDPOINT + "zones/" + zone_id + + "/analytics/dashboard", headers=HEADERS, + params=PARAMS) + try: + data = res.json() + all_req = float(data["result"]["timeseries"][0]["requests"]["all"]) + cached_req = float(data["result"]["timeseries"][0]["requests"]["cached"]) + current_cache_hit_rate = cached_req / all_req * 100 + if current_cache_hit_rate < threshold: + sys.exit(1) + + except Exception as error: + print(f"JSON Error: {error} \n Content returned from API call: {res.text}") + + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-z', '--zone', required=True, + help="Cloudflare's Zone ID") + parser.add_argument('-k', '--auth_key', required=True, + help="Authentication Key") + parser.add_argument('-e', '--email', required=True, + help="email to use for authentication for CloudFlare API") + parser.add_argument('-t', '--threshold', required=True, + help="Threshold limit to be passed to check against it") + args = parser.parse_args() + + calcualte_cache_hit_rate(args.zone, args.auth_key, args.email, args.threshold) diff --git a/util/jenkins/demo-course-provisioner.sh b/util/jenkins/demo-course-provisioner.sh new file mode 100644 index 00000000000..651444c6850 --- /dev/null +++ b/util/jenkins/demo-course-provisioner.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +set -ex + +function write_demo_course_script() { + cat < "$1" +#!/usr/bin/env bash + +set -ex + +demo_hashed_password='pbkdf2_sha256\$20000\$TjE34FJjc3vv\$0B7GUmH8RwrOc/BvMoxjb5j8EgnWTt3sxorDANeF7Qw=' +admin_password='${admin_hashed_password}' + +# Clone demo course +mkdir /edx/var/edxapp/data +chmod 777 /edx/var/edxapp/data +git clone https://github.com/openedx/openedx-demo-course.git /edx/app/demo/edx-demo-course + +# import demo course +docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e CMS_CFG=/edx/etc/cms.yml -e DJANGO_SETTINGS_MODULE=cms.envs.docker-production -e SERVICE_VARIANT=cms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/cms.yml:/edx/etc/cms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/app/demo:/edx/app/demo -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py cms import /edx/var/edxapp/data /edx/app/demo/edx-demo-course + +# Create admin and demo users +docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py lms manage_user edx edx@example.com --initial-password-hash \$admin_password --superuser --staff +for user in honor audit verified staff ; do + email="\$user@example.com" + # Set staff flag for staff user + if [[ \$user == "staff" ]] ; then + docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py lms manage_user \$user \$email --initial-password-hash \$demo_hashed_password --staff + else + docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py lms manage_user \$user \$email --initial-password-hash \$demo_hashed_password + fi + # Enroll users in the demo course + docker run --network=host --rm -u='www-data' -e NO_PREREQ_INSTALL="1" -e SKIP_WS_MIGRATIONS="1" -e LMS_CFG=/edx/etc/lms.yml -e DJANGO_SETTINGS_MODULE=lms.envs.docker-production -e SERVICE_VARIANT=lms -e EDX_PLATFORM_SETTINGS=docker-production -v /edx/etc/lms.yml:/edx/etc/lms.yml -v /edx/var/edx-themes:/edx/var/edx-themes -v /edx/var/edxapp:/edx/var/edxapp -v /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock ${app_repo}:latest python3 manage.py lms enroll_user_in_course -e \$email -c course-v1:edX+DemoX+Demo_Course +done +EOF +} diff --git a/util/jenkins/django-admin.sh b/util/jenkins/django-admin.sh new file mode 100644 index 00000000000..d047a3b453f --- /dev/null +++ b/util/jenkins/django-admin.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +cd configuration +pip install -r requirements.txt +env + +ansible="ansible first_in_tag_Name_${environment}-${deployment}-worker -i playbooks/ec2.py -u ubuntu -s -U www-data -m shell -a" +manage="cd /edx/app/edxapp/edx-platform && /edx/app/edxapp/venvs/edxapp/bin/python ./manage.py chdir=/edx/app/edxapp/edx-platform" + +if [ "$service_variant" != "UNSET" ]; then + manage="$manage $service_variant --settings aws" +fi + +if [ "$help" = "true" ]; then + manage="$manage help" +fi + +echo "Running $ansible \"$manage $command $options\"" +$ansible "$manage $command $options" diff --git a/util/jenkins/export_dead_locks/export_dead_locks.py b/util/jenkins/export_dead_locks/export_dead_locks.py new file mode 100644 index 00000000000..4bdc74ce18f --- /dev/null +++ b/util/jenkins/export_dead_locks/export_dead_locks.py @@ -0,0 +1,128 @@ +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import time +import uuid +import click +import re +import splunklib.client as splunk_client + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(environment, whitelistregions): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'ARN': RDS ARN, + 'Region': Region of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + if whitelistregions: + regions_list = {'Regions': [region for region in regions_list['Regions'] if region['RegionName'] in whitelistregions]} + for region in regions_list["Regions"]: + try: + rds_client = RDSBotoWrapper(region_name=region["RegionName"]) + response = rds_client.describe_db_instances() + for instance in response.get('DBInstances'): + if environment in instance.get("Endpoint").get("Address") and "test" not in instance["DBInstanceIdentifier"]: + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["ARN"] = instance["DBInstanceArn"] + temp_dict["Region"] = region["RegionName"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Username"] = instance.get("MasterUsername") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + except ClientError as e: + print(f"Unable to get RDS from this region error :{e}") + sys.exit(1) + return rds_list + + +def rds_controller(rds_list, username, password, hostname, splunkusername, splunkpassword, port, indexname): + for item in rds_list: + rds_host_endpoint = item["Endpoint"] + rds_port = item["Port"] + connection = pymysql.connect(host=rds_host_endpoint, port=rds_port, + user=username, password=password) + cursor = connection.cursor() + cursor.execute(""" + SHOW ENGINE INNODB STATUS; + """) + rds_result = cursor.fetchall() + cursor.close() + connection.close() + regex = r"-{4,}\sLATEST DETECTED DEADLOCK\s-{4,}\s((.*)\s)*?-{4,}" + global_str = "" + for row in rds_result: + matches = re.finditer(regex, row[2]) + for matchNum, match in enumerate(matches, start=1): + global_str = match.group() + expr = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}") + global_str = re.sub(expr, '', global_str) + #to avoid empty dead locks + if len(global_str) > 0: + service = splunk_client.connect(host=hostname, port=port, username=splunkusername, password=splunkpassword) + myindex = service.indexes[indexname] + # Open a socket + mysocket = myindex.attach(host=rds_host_endpoint, source="INNODB STATUS", sourcetype="RDS") + + # Send events to it + mysocket.send(str.encode(global_str)) + + # Close the socket + mysocket.close() + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', required=True, help='Use to identify the environment') +@click.option('--hostname', required=True, help='Use to identify the splunk hostname') +@click.option('--splunkusername', envvar='SPLUNKUSERNAME', required=True) +@click.option('--splunkpassword', envvar='SPLUNKPASSWORD', required=True) +@click.option('--port', required=True, help='Use to identify the splunk port') +@click.option('--indexname', required=True, help='Use to identify the splunk index name') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +@click.option('--whitelistregions', '-r', multiple=True, help='Regions to check, can be specified multiple times') +def main(username, password, environment, hostname, splunkusername, splunkpassword, port, indexname, rdsignore, whitelistregions): + rds_list = rds_extractor(environment, whitelistregions) + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + rds_controller(filtered_rds_list, username, password, hostname, splunkusername, splunkpassword, port, indexname) + + +if __name__ == '__main__': + main() + diff --git a/util/jenkins/export_dead_locks/requirements.txt b/util/jenkins/export_dead_locks/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/export_dead_locks/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/export_slow_logs/export_slow_query_logs.py b/util/jenkins/export_slow_logs/export_slow_query_logs.py new file mode 100644 index 00000000000..484935d24e8 --- /dev/null +++ b/util/jenkins/export_slow_logs/export_slow_query_logs.py @@ -0,0 +1,158 @@ +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import time +import uuid +import click + +MAX_TRIES = 5 + + +class CWBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("logs", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def put_log_events(self, **kwargs): + return self.client.put_log_events(**kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def create_log_stream(self, **kwargs): + return self.client.create_log_stream(**kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def create_log_group(self, **kwargs): + return self.client.create_log_group(**kwargs) + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(environment, whitelistregions): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'ARN': RDS ARN, + 'Region': Region of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + if whitelistregions: + regions_list = {'Regions': [region for region in regions_list['Regions'] if region['RegionName'] in whitelistregions]} + for region in regions_list["Regions"]: + try: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + if environment in instance.get("Endpoint").get("Address") and "test" not in instance["DBInstanceIdentifier"]: + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["ARN"] = instance["DBInstanceArn"] + temp_dict["Region"] = region["RegionName"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Username"] = instance.get("MasterUsername") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + except ClientError as e: + print(f"Unable to get RDS from this region error :{e}") + sys.exit(1) + return rds_list + + +def rds_controller(rds_list, username, password): + for item in rds_list: + rds_host_endpoint = item["Endpoint"] + rds_port = item["Port"] + connection = pymysql.connect(host=rds_host_endpoint, port=rds_port, + user=username, password=password) + cursor = connection.cursor() + try: + print(f"Checking slow log on RDS HOST {rds_host_endpoint}") + cursor.execute(""" + SELECT * + FROM mysql.slow_log + WHERE start_time > DATE_ADD(NOW(), INTERVAL -1 HOUR); + """) + rds_result = cursor.fetchall() + cursor.close() + connection.close() + if len(rds_result) > 0: + cw_logs = [] + sequencetoken = None + client = CWBotoWrapper() + loggroupname= "/slowlogs/" + rds_host_endpoint + try: + client.create_log_group(logGroupName=loggroupname) + print(('Created CloudWatch log group named "%s"', loggroupname)) + except ClientError: + print(('CloudWatch log group named "%s" already exists', loggroupname)) + LOG_STREAM = time.strftime('%Y-%m-%d') + "/[$LATEST]" + uuid.uuid4().hex + client.create_log_stream(logGroupName=loggroupname, logStreamName=LOG_STREAM) + for tables in rds_result: + temp = {} + temp["timestamp"] = int(tables[0].strftime("%s")) * 1000 + temp["message"] = "User@Host: " + str(tables[1]) + \ + "Query_time: " + str(tables[2]) + " Lock_time: " + str(tables[3]) + \ + " Rows_sent: " + str(tables[4]) + " Rows_examined: " + str(tables[5]) +\ + "Slow Query: " + str(tables[10]) + cw_logs.append(temp) + if sequencetoken == None: + response = client.put_log_events( + logGroupName=loggroupname, + logStreamName=LOG_STREAM, + logEvents=cw_logs + ) + else: + response = client.put_log_events( + logGroupName=loggroupname, + logStreamName=LOG_STREAM, + logEvents=cw_logs, + sequenceToken=sequencetoken + ) + sequencetoken = response["nextSequenceToken"] + except Exception as e: + print(e) + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', required=True, help='Use to identify the environment') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +@click.option('--whitelistregions', '-r', multiple=True, help='Regions to check, can be specified multiple times') +def main(username, password, environment, rdsignore, whitelistregions): + rds_list = rds_extractor(environment, whitelistregions) + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + rds_controller(filtered_rds_list, username, password) + + +if __name__ == '__main__': + main() + diff --git a/util/jenkins/export_slow_logs/requirements.txt b/util/jenkins/export_slow_logs/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/export_slow_logs/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/extend-sandbox-termination.py b/util/jenkins/extend-sandbox-termination.py new file mode 100644 index 00000000000..d92126ce1e5 --- /dev/null +++ b/util/jenkins/extend-sandbox-termination.py @@ -0,0 +1,88 @@ +''' +This script will be used to modify/extend the termination date on the sandbox. +''' +import boto +from datetime import datetime +from datetime import timedelta +import logging +import argparse + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +if __name__ == '__main__': + + parser = argparse.ArgumentParser( + description="Modify/extend the termination date on the sandbox.") + + parser.add_argument('-n', '--noop', action='http://github.com/store_true', + help="don't actually run the commands", default=False) + + parser.add_argument('-p', '--profile', default=None, + help="AWS profile to use when connecting.") + + extend_group = parser.add_mutually_exclusive_group(required=True) + + extend_group.add_argument('-d', '--day', default=None, + help="number of days", type=int) + + extend_group.add_argument('-a', '--always', default=False, + help="Do not terminate this Sandbox") + + group = parser.add_mutually_exclusive_group(required=True) + + group.add_argument('-u', '--username', default=None, + help="GitHub username") + + group.add_argument('-c', '--custom', default=None, + help="Custom name, if the sandbox was not created with the default options") + + group.add_argument('-i', '--instance-id', default=None, + help="Sandbox Instance ID") + + args = parser.parse_args() + + ec2 = boto.connect_ec2(profile_name=args.profile) + + days_to_increase = args.day + + if args.username: + sandbox_name = args.username + '-sandbox' + reservations = ec2.get_all_instances(filters={"tag:Name": sandbox_name}) + if args.custom: + sandbox_name = args.custom + reservations = ec2.get_all_instances(filters={"tag:Name": sandbox_name}) + if args.instance_id: + instance_id = args.instance_id + reservations = ec2.get_all_instances(instance_ids=[instance_id]) + + instance = reservations[0].instances[0] + + if args.noop: + logger.info("Sandbox ID:{} with Name: {} and Owner: {} will extend by {} days".format( + instance.id, + instance.tags['Name'], + instance.tags['owner'], + days_to_increase + ) + ) + elif args.always: + instance.add_tag('do_not_terminate', 'true') + logger.info("Sandbox ID:{} with Name: {} and Owner: {} will not be terminate".format( + instance.id, + instance.tags['Name'], + instance.tags['owner'], + ) + ) + else: + # modified the terminate time + terminate_time = datetime.strptime(str(instance.tags['instance_termination_time']), "%m-%d-%Y %H:%M:%S") + terminate_time = terminate_time + timedelta(days=days_to_increase) + instance.add_tag('instance_termination_time', terminate_time.strftime("%m-%d-%Y %H:%M:%S")) + logger.info("Sandbox ID:{} with Name: {} and Owner: {} has been extended by {} days".format( + instance.id, + instance.tags['Name'], + instance.tags['owner'], + days_to_increase + ) + ) diff --git a/util/jenkins/get-rc-branches.sh b/util/jenkins/get-rc-branches.sh new file mode 100755 index 00000000000..d92c2795ece --- /dev/null +++ b/util/jenkins/get-rc-branches.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +usage() { + + prog=$(basename "$0") + cat< /dev/null 2>&1 +else + $noop cd "/var/tmp/$repo_basename" + $noop git fetch > /dev/null > /dev/null 2>&1 +fi + +$noop cd "/var/tmp/$repo_basename" +if [[ -z $noop ]]; then + for branch in $(git branch -a | sort -r | tr -d ' ' | grep -E "$filter" ); do + echo "origin/${branch}" + done + for tag in $(git tag -l | sort -r | tr -d ' ' | grep -E "$filter"); do + echo "$tag" + done +else + echo "Would have checked for branches or tags using filter $filter" +fi diff --git a/util/jenkins/helm_update_checker/helm_update_checker.py b/util/jenkins/helm_update_checker/helm_update_checker.py new file mode 100644 index 00000000000..8f46f1eb9c9 --- /dev/null +++ b/util/jenkins/helm_update_checker/helm_update_checker.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python +import subprocess +import yaml +import sys +import logging +import click +import os +import json +import boto3 +import backoff +from botocore.exceptions import ClientError + + +LOGGER = logging.getLogger(__name__) +logging.basicConfig() +global_list = [] +MAX_TRIES = 5 + + +class SESBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("ses", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def send_email(self, *args, **kwargs): + return self.client.send_email(*args, **kwargs) + + +def parse_yaml(file_name): + with open(file_name, 'r') as stream: + try: + charts_name = yaml.safe_load(stream) + if "dependencies" in charts_name: + app_list = charts_name["dependencies"] + for key in app_list: + add_helm(key["repository"], key["name"]) + update_helm() + for key in app_list: + repo_name = get_repo_name(key["repository"]) + check_version(charts_name["name"], key["name"], repo_name, key["version"]) + except yaml.YAMLError as exc: + LOGGER.error("error in configuration file: %s" % str(exc)) + sys.exit(1) + except KeyError as e: + print(f"I got a KeyError - reason {str(e)}") + + +def add_helm(repo_url, repo_name): + try: + cmd_add = 'helm repo add ' + repo_name + " " + repo_url + subprocess.check_output(cmd_add, shell=True) + except subprocess.CalledProcessError as e: + print(e.output) + + +def update_helm(): + cmd_update = 'helm repo update' + subprocess.check_output(cmd_update, shell=True) + + +def get_repo_name(repo_url): + try: + get_repo_cmd = 'helm repo list -o json' + repositories = subprocess.check_output(get_repo_cmd, shell=True).strip() + repo_list = json.loads(repositories.decode()) + for repo in repo_list: + if repo["url"] == repo_url: + return repo['name'] + except subprocess.CalledProcessError as e: + print(e.output) + + +def check_version(chart_name, app_name, repo_name, app_version): + cmd = 'helm show chart ' + repo_name + "/" + app_name + ' | grep version | tail -1' + output = subprocess.check_output(cmd, shell=True) + latest_version = output.decode().split(": ")[-1].rstrip() + if not compare_version(app_version, latest_version): + temp_dict = { + chart_name+"/"+app_name: { + "current_version": app_version, + "latest_version": latest_version + }, + } + global_list.append(temp_dict) + + +def compare_version(current_version, latest_version): + return True if current_version == latest_version else False + + +def find(name, path): + for root, dirs, files in os.walk(path): + if name in files: + parse_yaml(os.path.join(root, name)) + + +def send_an_email(to_addr, from_addr, app_list, region): + ses_client = SESBotoWrapper(region_name=region) + + message = """ +

Hello,

+

Updates are available for the following helm charts

+ + + + + + + """ + for apps in app_list: + for data in apps: + message += """ + + + + """.format( + AppName=data, + CurrentVersion=apps[data]["current_version"], + LatestVersion=apps[data]["latest_version"] + ) + + message += """
App NameCurrent versionLatest Version
{AppName}{CurrentVersion}{LatestVersion}
""" + print(("Sending the following as email to {}".format(to_addr))) + print(message) + ses_client.send_email( + Source=from_addr, + Destination={ + 'ToAddresses': [ + to_addr + ] + }, + Message={ + 'Subject': { + 'Data': 'Updates available for helms charts', + 'Charset': 'utf-8' + }, + 'Body': { + 'Html':{ + 'Data': message, + 'Charset': 'utf-8' + } + } + } + ) + + +@click.command() +@click.option('--file-name', required=True, help='Filename which have helm chart details.') +@click.option('--file-path', required=True, help='File path where helm chart file exists.') +@click.option('--region', multiple=True, help='Default AWS region') +@click.option('--recipient', multiple=True, help='Recipient Email address') +@click.option('--sender', multiple=True, help='Sender email address') +def controller(file_name, file_path, region, recipient, sender): + find(file_name, file_path) + if len(global_list) > 0: + send_an_email(recipient[0], sender[0], global_list, region[0]) + + +if __name__ == "__main__": + controller() diff --git a/util/jenkins/helm_update_checker/requirements.txt b/util/jenkins/helm_update_checker/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/helm_update_checker/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/kustomize_update_checker/kustomize_update_checker.py b/util/jenkins/kustomize_update_checker/kustomize_update_checker.py new file mode 100644 index 00000000000..b8b720f219f --- /dev/null +++ b/util/jenkins/kustomize_update_checker/kustomize_update_checker.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +import subprocess +import yaml +import sys +import logging +import click +import boto3 +import backoff +from botocore.exceptions import ClientError + + +LOGGER = logging.getLogger(__name__) +logging.basicConfig() +global_list = [] +MAX_TRIES = 5 + + +class SESBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("ses", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def send_email(self, *args, **kwargs): + return self.client.send_email(*args, **kwargs) + + +def parse_yaml(file_name): + with open(file_name, 'r') as stream: + try: + apps_details = yaml.safe_load(stream) + for item in apps_details: + registery = apps_details[item]["registery"] + repo = apps_details[item]["repo"] + url = "https://" + registery + "/v1/repositories/" + repo + "/tags" + sed_filter = "sed -e 's/[][]//g' -e 's/\"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}' | tail -1" + cmd = "wget -q " + url + " -O - | " + sed_filter + latest_version = subprocess.check_output(cmd, shell=True).strip().decode("utf-8") + check_version(item, apps_details[item]["version"], latest_version) + + except yaml.YAMLError as exc: + LOGGER.error("error in configuration file: %s" % str(exc)) + sys.exit(1) + except KeyError as e: + print(f"I got a KeyError - reason {str(e)}") + + +def check_version(app_name, app_version, latest_version): + if not compare_version(app_version, latest_version): + temp_dict = { + app_name: { + "current_version": app_version, + "latest_version": latest_version + }, + } + global_list.append(temp_dict) + + +def compare_version(current_version, latest_version): + if current_version == latest_version: + return True + return False + + +def send_an_email(to_addr, from_addr, app_list, region): + ses_client = SESBotoWrapper(region_name=region) + + message = """ +

Hello,

+

Updates are available for the following kustomize based apps

+ + + + + + + """ + for apps in app_list: + for data in apps: + message += """ + + + + """.format( + AppName=data, + CurrentVersion=apps[data]["current_version"], + LatestVersion=apps[data]["latest_version"] + ) + + message += """
App NameCurrent versionLatest Version
{AppName}{CurrentVersion}{LatestVersion}
""" + print(f"Sending the following as email to {to_addr}") + print(message) + ses_client.send_email( + Source=from_addr, + Destination={ + 'ToAddresses': [ + to_addr + ] + }, + Message={ + 'Subject': { + 'Data': 'Updates available for kustomize based apps', + 'Charset': 'utf-8' + }, + 'Body': { + 'Html':{ + 'Data': message, + 'Charset': 'utf-8' + } + } + } + ) + + +@click.command() +@click.option('--file-name', required=True, help='Filename which have kustomize based apps details.') +@click.option('--file-path', required=True, help='File path where kustomize based apps file exists.') +@click.option('--region', multiple=True, help='Default AWS region') +@click.option('--recipient', multiple=True, help='Recipient Email address') +@click.option('--sender', multiple=True, help='Sender email address') +def controller(file_name, file_path, region, recipient, sender): + parse_yaml(file_path + "/" + file_name) + if len(global_list) > 0: + send_an_email(recipient[0], sender[0], global_list, region[0]) + + +if __name__ == "__main__": + controller() diff --git a/util/jenkins/kustomize_update_checker/requirements.txt b/util/jenkins/kustomize_update_checker/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/kustomize_update_checker/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/list_mysql_process/list_mysql_process.py b/util/jenkins/list_mysql_process/list_mysql_process.py new file mode 100644 index 00000000000..2dd092c0889 --- /dev/null +++ b/util/jenkins/list_mysql_process/list_mysql_process.py @@ -0,0 +1,149 @@ +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import click + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(environment, whitelistregions): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'Endpoint': Endpoint of RDS + 'Port': Port of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + if whitelistregions: + regions_list = {'Regions': [region for region in regions_list['Regions'] if region['RegionName'] in whitelistregions]} + for region in regions_list["Regions"]: + try: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + if environment in instance.get("Endpoint").get("Address") and "test" not in instance["DBInstanceIdentifier"]: + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + except ClientError as e: + print(f"Unable to get RDS from this region error :{e}") + sys.exit(1) + return rds_list + + +def check_queries_running(rds_list, username, password): + """ + Return: + Return list of currently running queries + [ + { + "id": "string", + "user": "string", + "host": "string", + "command": "string", + "time": "integer", + "state": "string", + "info": "string" + } + ] + """ + try: + process_list = [] + for item in rds_list: + rds_host_endpoint = item["Endpoint"] + rds_port = item["Port"] + connection = pymysql.connect(host=rds_host_endpoint, + port=rds_port, user=username, password=password) + # prepare a cursor object using cursor() method + cursor = connection.cursor() + # execute SQL query using execute() method. + cursor.execute(""" + SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST + """) + + rds_result = cursor.fetchall() + cursor.close() + connection.close() + for process in rds_result: + temp_dict = {} + temp_dict["id"] = process[0] + temp_dict["user"] = process[1] + temp_dict["host"] = process[2] + temp_dict["command"] = process[4] + temp_dict["time"] = process[5] + temp_dict["state"] = process[6] + temp_dict["info"] = process[7] + process_list.append(temp_dict) + return process_list + except Exception as ex: + print(ex) + sys.exit(1) + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', required=True, help='Use to identify the environment') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +@click.option('--whitelistregions', '-r', multiple=True, help='Regions to check, can be specified multiple times') +def controller(username, password, environment, rdsignore, whitelistregions): + """ + Control execution of all other functions + Arguments: + username (str): + Get this from cli args + + password (str): + Get this from cli args + + environment (str): + Get this from cli args + """ + rds_list = rds_extractor(environment, whitelistregions) + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + process_list = check_queries_running(filtered_rds_list, username, password) + if len(process_list) > 0: + format_string = "{:<20}{:<20}{:<30}{:<20}{:<20}{:<70}{}" + print(format_string.format("Query ID", "User Name", "Host", "Command", "Time Executed", "State", "Info")) + for items in process_list: + print(format_string.format(items["id"], items["user"], items["host"], items["command"], + str(items["time"]) + " sec", items["state"], items["info"])) + exit(0) + + +if __name__ == '__main__': + controller() + diff --git a/util/jenkins/list_mysql_process/requirements.txt b/util/jenkins/list_mysql_process/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/list_mysql_process/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/missing_alerts_checker/missing_alerts_checker.py b/util/jenkins/missing_alerts_checker/missing_alerts_checker.py new file mode 100644 index 00000000000..ec283bb26f2 --- /dev/null +++ b/util/jenkins/missing_alerts_checker/missing_alerts_checker.py @@ -0,0 +1,270 @@ +import boto3 +import requests +import click +from botocore.exceptions import ClientError +import sys +import re + + +class NewRelic: + def __init__(self, new_relic_api_key): + self.url_alert_extractor = "https://api.newrelic.com/v2/alerts_policies.json" + self.headers = {'X-Api-Key': new_relic_api_key} + + def new_relic_policies_extractor(self): + """ + Return: + Return list of all alert policies extracted from New relic + { + "policy": { + "id": int, + "incident_preference": str, + "name": str, + "created_at": int, + "updated_at": int + } + } + """ + response = requests.get(self.url_alert_extractor, headers=self.headers) + if response.status_code != 200: + print("Unable to communicate with New relic.") + sys.exit(1) + try: + alert_policies = response.json() + except ValueError: + print(f"Failed to parse response json. Got:\n{response.text}") + sys.exit(1) + return alert_policies + + +class InfraAlerts: + def edc_extractor(self): + """ + Return list of all EC2 instances with EDC's tags across all the regions + Returns: + [ + { + 'name': name, + 'ID': instance.id + } + ] + """ + client_region = boto3.client('ec2') + filter_tags = [ + { + "Name": "tag:environment", + "Values": ["*"] + }, + { + "Name": "tag:deployment", + "Values": ["*"] + }, + { + "Name": "tag:cluster", + "Values": ["*"] + }, + { + 'Name': 'instance-state-name', + 'Values': ['running'] + } + ] + instance_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + for region in regions_list['Regions']: + client = boto3.resource('ec2', region_name=region['RegionName']) + response = client.instances.filter(Filters=filter_tags) + for instance in response: + temp_dict = {} + for tag in instance.tags: + if tag['Key'] == "Name": + name = tag['Value'] + temp_dict = { + 'name': name, + 'ID': instance.id + } + break + else: + pass + instance_list.append(temp_dict) + return instance_list + + def missing_alerts_checker(self, instance_list, alert_policies): + """ + Arguments: + instance_list (list): + List of all instances for which we find alerts + alert_policies list(dict): + List of all existing alerts new relic + Return: + Return list of all instances which have no alert in new Relic + [ + { + 'name': name, + 'ID': instance.id + } + ] + """ + result_instance = [] + for instance in instance_list: + if not any(policy["name"] == instance["name"] + "-infrastructure" for policy in alert_policies["policies"]): + result_instance.append(instance) + return result_instance + + +class AppAlerts: + def __init__(self, new_relic_api_key): + self.url_app_extractor = "https://api.newrelic.com/v2/applications.json" + self.headers = {'X-Api-Key': new_relic_api_key} + + def new_relic_app_extractor(self): + """ + Return: + Return list all applications in new relic + """ + response = requests.get(self.url_app_extractor, headers=self.headers) + if response.status_code != 200: + print("Unable to communicate with New relic.") + sys.exit(1) + try: + apps_list = response.json() + except ValueError: + print(f"Failed to parse response json. Got:\n{response.text}") + sys.exit(1) + return apps_list["applications"] + + def missing_alerts_checker(self, app_list, alert_policies): + """ + Arguments: + app_list (list): + List of all applications for which we find alerts + alert_policies list(dict): + List of all existing alerts new relic + Return: + Return list of all applications which have no alert in new Relic + """ + result_apps = [] + for apps in app_list: + if not any(policy["name"] == apps["name"] + "-application" for policy in alert_policies["policies"]): + result_apps.append(apps) + return result_apps + + +class BrowserAlerts: + def __init__(self, new_relic_api_key): + self.url_browser_extractor = "https://api.newrelic.com/v2/browser_applications.json" + self.headers = {'X-Api-Key': new_relic_api_key} + + def new_relic_browser_extractor(self): + """ + Return: + Return list all browser applications in new relic + [ + { + "id": "integer", + "name": "string", + "browser_monitoring_key": "string", + "loader_script": "string" + } + ] + """ + response = requests.get(self.url_browser_extractor, headers=self.headers) + if response.status_code != 200: + print("Unable to communicate with New relic.") + sys.exit(1) + try: + browser_list = response.json() + except ValueError: + raise Exception(f"Failed to parse response json. Got:\n{response.text}") + return browser_list["browser_applications"] + + def missing_alerts_checker(self, browser_list, alert_policies): + """ + Arguments: + browser_list (list): + List of all browser applications for which we find alerts + alert_policies list(dict): + List of all existing alerts new relic + Return: + Return list of all browser applications which have no alert in new Relic + [ + { + "id": "integer", + "name": "string", + "browser_monitoring_key": "string", + "loader_script": "string" + } + ] + """ + result_browser = [] + for browser in browser_list: + if not any(policy["name"] == browser["name"].rstrip() + "-browser" for policy in alert_policies["policies"]): + result_browser.append(browser) + return result_browser + + +@click.command() +@click.option('--new-relic-api-key', required=True, help='API Key to use to speak with NewRelic.') +@click.option('--ignore', '-i', multiple=True, help='App name regex to filter out, can be specified multiple times') +def controller(new_relic_api_key,ignore): + """ + Control execution of all other functions + Arguments: + new_relic_api_key (str): + Get this from cli args + """ + flag = 0 + # Initializing object of classes + infracheck = InfraAlerts() + new_relic_obj = NewRelic(new_relic_api_key) + # Get list of all instances in different regions + instance_list = infracheck.edc_extractor() + # Get list of all alert policies in new relic + alert_policies = new_relic_obj.new_relic_policies_extractor() + # Get list of all instances without alerts + missing_alerts_list = infracheck.missing_alerts_checker(instance_list, alert_policies) + filtered_missing_alerts_list = list([x for x in missing_alerts_list if not any(re.search(r, x['name']) for r in ignore)]) + format_string = "{:<30}{}" + print(format_string.format("Instance ID", "Instance Name")) + for instance_wo_alerts in filtered_missing_alerts_list: + print(format_string.format(instance_wo_alerts["ID"], instance_wo_alerts["name"])) + flag = 1 + + # Initializing object of classes + appcheck = AppAlerts(new_relic_api_key) + new_relic_obj = NewRelic(new_relic_api_key) + # Get list of all applications from new relic + apps_list = appcheck.new_relic_app_extractor() + # Get list of all applications without alerts + missing_alerts_list_app = appcheck.missing_alerts_checker(apps_list, alert_policies) + filtered_missing_alerts_list_app = list([x for x in missing_alerts_list_app if not any(re.search(r, x['name']) for r in ignore)]) + format_string = "{:<20}{}" + print("") + print(format_string.format("Application ID", "Application Name")) + for instance_wo_alerts in filtered_missing_alerts_list_app: + print(format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"])) + flag = 1 + + # Initializing object of classes + browsercheck = BrowserAlerts(new_relic_api_key) + new_relic_obj = NewRelic(new_relic_api_key) + # Get list of all browser applications from new relic + browser_list = browsercheck.new_relic_browser_extractor() + # Get list of all browser applications without alerts + missing_alerts_list_browser = browsercheck.missing_alerts_checker(browser_list, alert_policies) + filtered_missing_alerts_list_browser = list([x for x in missing_alerts_list_browser if not any(re.search(r, x['name']) for r in ignore)]) + format_string = "{:<20}{}" + print("") + print(format_string.format("Browser ID", "Browser Name")) + for instance_wo_alerts in filtered_missing_alerts_list_browser: + print(format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"])) + flag = 1 + sys.exit(flag) + + +if __name__ == '__main__': + controller() + diff --git a/util/jenkins/monitor_repos.py b/util/jenkins/monitor_repos.py deleted file mode 100644 index 3dfce5a4e7f..00000000000 --- a/util/jenkins/monitor_repos.py +++ /dev/null @@ -1,83 +0,0 @@ -import argparse -import json -import logging as log -import pickle -import requests -import yaml -from datetime import datetime -from git import Repo -from os import path -from pprint import pformat -from pymongo import MongoClient, DESCENDING -from stage_release import uri_from - -def releases(repo): - """ - Yield a list of all release candidates from the origin. - """ - for ref in repo.refs: - if ref.name.startswith('origin/rc/'): - yield ref - -def candidates_since(repo, time): - """ - Given a repo yield a list of release candidate refs that have a - commit on them after the passed in time - """ - for rc in releases(repo): - last_update = datetime.utcfromtimestamp(rc.commit.committed_date) - if last_update > time: - # New or updated RC - yield rc - -def stage_release(url, token, repo, rc): - """ - Submit a job to stage a new release for the new rc of the repo. - """ - # Setup the Jenkins params. - params = [] - params.append({'name': "{}_REF".format(repo), 'value': True}) - params.append({'name': repo, 'value': rc.commit.hexsha}) - build_params = {'parameter': params} - log.info("New rc found{}, staging new release.".format(rc.name)) - r = requests.post(url, - data={"token", token}, - params={"json": json.dumps(build_params)}) - if r.status_code != 201: - msg = "Failed to submit request with params: {}" - raise Exception(msg.format(pformat(build_params))) - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Monitor git repos for new rc branches.") - parser.add_argument('-c', '--config', required=True, - help="Config file.") - parser.add_argument('-p', '--pickle', default="data.pickle", - help="Pickle of presistent data.") - - args = parser.parse_args() - - config = yaml.safe_load(open(args.config)) - - if path.exists(args.pickle): - data = pickle.load(open(args.pickle)) - else: - data = {} - - # Presist the last time we made this check. - if 'last_check' not in data: - last_check = datetime.utcnow() - else: - last_check = data['last_check'] - - data['last_check'] = datetime.utcnow() - - # Find plays that are affected by this repo. - repos_with_changes = {} - for repo in config['repos']: - # Check for new rc candidates. - for rc in candidates_since(Repo(repo), last_check): - # Notify stage-release to build for the new repo. - stage_release(config['abbey_url'], config['abbey_token'], repo, rc) - - pickle.dump(data, open(args.pickle, 'w')) diff --git a/util/jenkins/primary_keys/__init__.py b/util/jenkins/primary_keys/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/util/jenkins/primary_keys/check_primary_keys.py b/util/jenkins/primary_keys/check_primary_keys.py new file mode 100644 index 00000000000..61ea5b87f81 --- /dev/null +++ b/util/jenkins/primary_keys/check_primary_keys.py @@ -0,0 +1,361 @@ +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import click +from datetime import datetime, timedelta, timezone + +MAX_TRIES = 5 +PERIOD = 360 +UNIT = 'Percent' + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class CwBotoWrapper(): + def __init__(self): + self.client = boto3.client('cloudwatch') + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def list_metrics(self, *args, **kwargs): + return self.client.list_metrics(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def put_metric_data(self, *args, **kwargs): + return self.client.put_metric_data(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def get_metric_stats(self, *args, **kwargs): + return self.client.get_metric_statistics(*args, **kwargs) + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +class SESBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("ses", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def send_email(self, *args, **kwargs): + return self.client.send_email(*args, **kwargs) + + +def send_an_email(to_addr, from_addr, primary_keys_message, region): + ses_client = SESBotoWrapper(region_name=region) + + message = """ +

Hello,

+

Primary keys of these tables exhausted soon

+ + + + + + + + """ + for item in range(len(primary_keys_message)): + message += """ + + + + + """.format( + Database=primary_keys_message[item]['database_name'], + Table=primary_keys_message[item]['table_name'], + UsedPercentage=primary_keys_message[item]['percentage_of_PKs_consumed'], + DaysRemaining=primary_keys_message[item]['remaining_days'] if "remaining_days" in primary_keys_message[item] else '' + ) + + message += """
DatabaseTableUsage PercentageRemaining Days
{Database}{Table}{UsedPercentage}{DaysRemaining}
""" + print(f"Sending the following as email to {to_addr}") + print(message) + ses_client.send_email( + Source=from_addr, + Destination={ + 'ToAddresses': [ + to_addr + ] + }, + Message={ + 'Subject': { + 'Data': 'Primary keys of these table would be exhausted soon', + 'Charset': 'utf-8' + }, + 'Body': { + 'Html':{ + 'Data': message, + 'Charset': 'utf-8' + } + } + } + ) + + +def get_rds_from_all_regions(whitelistregions): + """ + Gets a list of RDS instances across all the regions and deployments in AWS + + :returns: + list of all RDS instances across all the regions + [ + { + 'name': name of RDS, + 'Endpoint': Endpoint of RDS + 'Port': Port of RDS + } + ] + name (string) + Endpoint (string) + Port (string) + """ + ec2_client = EC2BotoWrapper() + rds_list = [] + try: + regions_list = ec2_client.describe_regions() + except ClientError as e: + print(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + if whitelistregions: + regions_list = {'Regions': [region for region in regions_list['Regions'] if region['RegionName'] in whitelistregions]} + for region in regions_list["Regions"]: + try: + print("Getting RDS instances in region {}".format(region["RegionName"])) + rds_client = RDSBotoWrapper(region_name=region["RegionName"]) + response = rds_client.describe_db_instances() + for instance in response.get('DBInstances'): + if "test" not in instance["DBInstanceIdentifier"]: + temp_dict = dict() + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + except ClientError as e: + print(f"Unable to get RDS from this region error :{e}") + sys.exit(1) + return rds_list + + +def check_primary_keys(rds_list, username, password, environment, deploy): + """ + :param rds_list: + :param username: + :param password: + + :returns: + Return list of all tables that cross threshold limit + [ + { + "name": "string", + "db": "string", + "table": "string", + "size": "string", + } + ] + """ + cloudwatch = CwBotoWrapper() + metric_name = 'used_key_space' + namespace = f"rds-primary-keys/{environment}-{deploy}" + try: + table_list = [] + metric_data = [] + tables_reaching_exhaustion_limit = [] + for rds_instance in rds_list: + print("Checking rds instance {}".format(rds_instance["name"])) + rds_host_endpoint = rds_instance["Endpoint"] + rds_port = rds_instance["Port"] + connection = pymysql.connect(host=rds_host_endpoint, + port=rds_port, + user=username, + password=password) + # prepare a cursor object using cursor() method + cursor = connection.cursor() + # execute SQL query using execute() method. + # this query will return the tables with usage in percentage, result is limited to 10 + cursor.execute(""" + SELECT + table_schema, + table_name, + column_name, + column_type, + auto_increment, + max_int, + ROUND(auto_increment/max_int*100,2) AS used_pct + FROM + ( + SELECT + table_schema, + table_name, + column_name, + column_type, + auto_increment, + pow + (2, + case data_type + when 'tinyint' then 7 + when 'smallint' then 15 + when 'mediumint' then 23 + when 'int' then 31 + when 'bigint' then 63 + end + +(column_type like '% unsigned'))-1 + as max_int + FROM + information_schema.tables t + JOIN information_schema.columns c + USING (table_schema,table_name) + WHERE t.table_schema not in ('mysql','information_schema','performance_schema') + AND t.table_type = 'base table' + AND c.extra LIKE '%auto_increment%' + AND t.auto_increment IS NOT NULL + ) + TMP ORDER BY used_pct desc + LIMIT 10; + """) + rds_result = cursor.fetchall() + cursor.close() + connection.close() + for result_table in rds_result: + table_data = {} + db_name = result_table[0] + table_name = result_table[1] + table_name_combined = f"{db_name}.{table_name}" + table_percent = result_table[6] + if table_percent > 70: + print("RDS {} Table {}: Primary keys {}% full".format( + rds_instance["name"], table_name_combined, table_percent)) + metric_data.append({ + 'MetricName': metric_name, + 'Dimensions': [{ + "Name": rds_instance["name"], + "Value": table_name_combined + }], + 'Value': table_percent, # percentage of the usage of primary keys + 'Unit': UNIT + }) + table_data["database_name"] = rds_instance['name'] + table_data["table_name"] = table_name_combined + table_data["percentage_of_PKs_consumed"] = table_percent + remaining_days_table_name = table_name_combined + # Hack to transition to metric names with db prepended + if table_name == "courseware_studentmodule" and rds_instance["name"] in [ + "prod-edx-edxapp-us-east-1b-2", + "prod-edx-edxapp-us-east-1c-2", + ]: + remaining_days_table_name = table_name + metric_data.append({ + 'MetricName': metric_name, + 'Dimensions': [{ + "Name": rds_instance["name"], + "Value": table_name + }], + 'Value': table_percent, # percentage of the usage of primary keys + 'Unit': UNIT + }) + + remaining_days = get_metrics_and_calcuate_diff(namespace, metric_name, rds_instance["name"], table_name, table_percent) + if remaining_days: + table_data["remaining_days"] = remaining_days + tables_reaching_exhaustion_limit.append(table_data) + if len(metric_data) > 0: + cloudwatch.put_metric_data(Namespace=namespace, MetricData=metric_data) + return tables_reaching_exhaustion_limit + except Exception as e: + print(("Please see the following exception ", e)) + sys.exit(1) + + +def get_metrics_and_calcuate_diff(namespace, metric_name, dimension, value, current_consumption): + cloudwatch = CwBotoWrapper() + res = cloudwatch.get_metric_stats( + Namespace=namespace, + MetricName=metric_name, + Dimensions=[ + { + 'Name': dimension, + 'Value': value + }, + ], + StartTime=datetime.utcnow() - timedelta(days=180), + EndTime=datetime.utcnow(), + Period=86400, + Statistics=[ + 'Maximum', + ], + Unit=UNIT + ) + datapoints = res["Datapoints"] + days_remaining_before_exhaustion = '' + if len(datapoints) > 0: + max_value = max(datapoints, key=lambda x: x['Timestamp']) + time_diff = datetime.now(timezone.utc) - max_value["Timestamp"] + last_max_reading = max_value["Maximum"] + consumed_keys_percentage = 100 - current_consumption + if current_consumption > last_max_reading: + current_usage = current_consumption - last_max_reading + no_of_days = time_diff.days + increase_over_time_period = current_usage/no_of_days + days_remaining_before_exhaustion = consumed_keys_percentage/increase_over_time_period + print("Days remaining for {table} table on db {db}: {days}".format(table=value, + db=dimension, + days=days_remaining_before_exhaustion)) + return days_remaining_before_exhaustion + + + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', '-e', required=True) +@click.option('--deploy', '-d', required=True, + help="Deployment (i.e. edx or edge)") +@click.option('--region', multiple=True, help='Default AWS region') +@click.option('--recipient', multiple=True, help='Recipient Email address') +@click.option('--sender', multiple=True, help='Sender email address') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +@click.option('--whitelistregions', '-r', multiple=True, help='Regions to check, can be specified multiple times') +def controller(username, password, environment, deploy, region, recipient, sender, rdsignore, whitelistregions): + """ + calls other function and calculate the results + :param username: username for the RDS. + :param password: password for the RDS. + :return: None + """ + # get list of all the RDSes across all the regions and deployments + rds_list = get_rds_from_all_regions(whitelistregions) + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + table_list = check_primary_keys(filtered_rds_list, username, password, environment, deploy) + if len(table_list) > 0: + send_an_email(recipient[0], sender[0], table_list, region[0]) + sys.exit(0) + + +if __name__ == "__main__": + controller() diff --git a/util/jenkins/primary_keys/requirements.txt b/util/jenkins/primary_keys/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/primary_keys/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/rds_alarms_checker/missing_rds_alarms.py b/util/jenkins/rds_alarms_checker/missing_rds_alarms.py new file mode 100644 index 00000000000..151a50183d5 --- /dev/null +++ b/util/jenkins/rds_alarms_checker/missing_rds_alarms.py @@ -0,0 +1,110 @@ +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import click + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +class CWBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("cloudwatch", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_alarms(self, **kwargs): + return self.client.describe_alarms(**kwargs) + + +def rds_extractor(whitelistregions): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'ARN': RDS ARN, + 'Region': Region of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + if whitelistregions: + regions_list = {'Regions': [region for region in regions_list['Regions'] if region['RegionName'] in whitelistregions]} + for region in regions_list["Regions"]: + try: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + if "test" not in instance["DBInstanceIdentifier"]: + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["ARN"] = instance["DBInstanceArn"] + temp_dict["Region"] = region["RegionName"] + rds_list.append(temp_dict) + except ClientError as e: + print(f"Unable to get RDS from this region error :{e}") + sys.exit(1) + return rds_list + + +def cloudwatch_alarm_checker(alarmprefix, region): + """ + Return number of alarms associated with given RDS instance + Returns: + len(alarms): integer + """ + client = CWBotoWrapper(region_name=region) + alarms = client.describe_alarms(AlarmNamePrefix=alarmprefix) + return len(alarms.get('MetricAlarms')) + + +@click.command() +@click.option('--ignore', type=(str), multiple=True, help='RDS Instances to ignore') +@click.option('--whitelistregions', '-r', multiple=True, help='Regions to check, can be specified multiple times') +def controller(ignore, whitelistregions): + """ + Control execution of all other functions + """ + rds = rds_extractor(whitelistregions) + missing_alarm = [] + # List of RDS we don't care about + ignore_rds_list = list(ignore) + for db in rds: + if db["name"] not in ignore_rds_list: + alarms_count = cloudwatch_alarm_checker(db["name"], db["Region"]) + if alarms_count < 1: + missing_alarm.append(db["name"]) + if len(missing_alarm) > 0: + print("RDS Name") + print('\n'.join(str(p) for p in missing_alarm)) + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + controller() diff --git a/util/jenkins/rds_alarms_checker/requirements.txt b/util/jenkins/rds_alarms_checker/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/rds_alarms_checker/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/requirements-cloudflare.txt b/util/jenkins/requirements-cloudflare.txt new file mode 100644 index 00000000000..9419d9fd3d1 --- /dev/null +++ b/util/jenkins/requirements-cloudflare.txt @@ -0,0 +1,18 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +certifi==2023.11.17 + # via requests +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via -r requirements/cloudflare.in +idna==3.6 + # via requests +requests==2.31.0 + # via -r requirements/cloudflare.in +urllib3==2.1.0 + # via requests diff --git a/util/jenkins/requirements.txt b/util/jenkins/requirements.txt new file mode 100644 index 00000000000..4ddf612445b --- /dev/null +++ b/util/jenkins/requirements.txt @@ -0,0 +1,134 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +amqp==5.2.0 + # via kombu +argcomplete==3.2.1 + # via yq +awscli==1.32.2 + # via -r requirements/jenkins.in +backoff==1.4.3 + # via -r requirements/jenkins.in +backports-zoneinfo[tzdata]==0.2.1 + # via + # celery + # kombu +billiard==4.2.0 + # via celery +boto==2.49.0 + # via -r requirements/jenkins.in +boto3==1.34.2 + # via -r requirements/jenkins.in +botocore==1.34.2 + # via + # awscli + # boto3 + # s3transfer +celery==5.3.6 + # via -r requirements/jenkins.in +certifi==2023.11.17 + # via + # opsgenie-sdk + # requests +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via + # -r requirements/jenkins.in + # celery + # click-didyoumean + # click-plugins + # click-repl +click-didyoumean==0.3.0 + # via celery +click-plugins==1.1.1 + # via celery +click-repl==0.3.0 + # via celery +colorama==0.4.4 + # via awscli +docutils==0.16 + # via awscli +idna==3.6 + # via requests +jmespath==1.0.1 + # via + # boto3 + # botocore +jq==1.6.0 + # via -r requirements/jenkins.in +kombu==5.3.4 + # via celery +opsgenie-sdk==0.3.1 + # via -r requirements/jenkins.in +prompt-toolkit==3.0.43 + # via click-repl +pyasn1==0.5.1 + # via rsa +pymysql==0.9.3 + # via -r requirements/jenkins.in +python-dateutil==2.8.2 + # via + # botocore + # celery + # opsgenie-sdk + # s3cmd +python-gnupg==0.5.2 + # via -r requirements/jenkins.in +python-magic==0.4.27 + # via s3cmd +pytz==2023.3.post1 + # via opsgenie-sdk +pyyaml==6.0.1 + # via + # -r requirements/jenkins.in + # awscli + # yq +redis==2.10.6 + # via -r requirements/jenkins.in +requests==2.31.0 + # via opsgenie-sdk +rsa==4.7.2 + # via awscli +s3cmd==2.4.0 + # via -r requirements/jenkins.in +s3transfer==0.9.0 + # via + # awscli + # boto3 +six==1.16.0 + # via + # opsgenie-sdk + # python-dateutil +splunk-sdk==1.6.16 + # via -r requirements/jenkins.in +tomlkit==0.12.3 + # via yq +typing-extensions==4.9.0 + # via kombu +tzdata==2023.3 + # via + # backports-zoneinfo + # celery +urllib3==1.26.18 + # via + # botocore + # opsgenie-sdk + # requests +vine==5.1.0 + # via + # amqp + # celery + # kombu +wcwidth==0.2.12 + # via prompt-toolkit +xmltodict==0.13.0 + # via yq +yq==3.2.3 + # via -r requirements/jenkins.in + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/util/jenkins/run-ansible.sh b/util/jenkins/run-ansible.sh new file mode 100644 index 00000000000..29f60d7caf6 --- /dev/null +++ b/util/jenkins/run-ansible.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +# A simple wrapper to run ansible from Jenkins. +# This assumes that you will be running on one or more servers +# that are tagged with Name: -- + +if [[ + -z $WORKSPACE || + -z $environment_tag || + -z $deployment_tag || + -z $play_tag || + -z $ansible_play || + -z $elb_pre_post || + -z $first_in || + -z $serial_count + ]]; then + echo "Environment incorrect for this wrapper script" + env + exit 1 +fi + +cd $WORKSPACE/configuration/playbooks + +ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post" + +if [ ! -z "$extra_vars" ]; then + for arg in $extra_vars; do + ansible_extra_vars+=" -e $arg" + done +fi + +if [[ $run_migrations == "true" ]]; then + ansible_extra_vars+=" -e migrate_db=yes" +fi + +if [[ $check_mode == "true" ]]; then + ansible_extra_vars+=" --check" +fi + +if [[ ! -z "$run_on_single_ip" ]]; then + ansible_limit+="$run_on_single_ip" +else + if [[ $first_in == "true" ]]; then + ansible_limit+="first_in_" + fi + ansible_limit+="tag_Name_${environment_tag}-${deployment_tag}-${play_tag}" +fi + +if [[ ! -z "$task_tags" ]]; then + ansible_task_tags+="--tags $task_tags" +fi + +if [[ -z "$ssh_user" ]]; then + ansible_ssh_user="ubuntu" +else + ansible_ssh_user="${ssh_user}" +fi + +if [[ -f ${WORKSPACE}/configuration-internal/ansible/vars/${deployment_tag}.yml ]]; then + extra_var_args+=" -e@${WORKSPACE}/configuration-internal/ansible/vars/${deployment_tag}.yml" +fi + +if [[ -f ${WORKSPACE}/configuration-internal/ansible/vars/${environment_tag}-${deployment_tag}.yml ]]; then + extra_var_args+=" -e@${WORKSPACE}/configuration-internal/ansible/vars/${environment_tag}-${deployment_tag}.yml" +fi + +if [[ -f ${WORKSPACE}/configuration-secure/ansible/vars/${deployment_tag}.yml ]]; then + extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/${deployment_tag}.yml" +fi + +if [[ -f ${WORKSPACE}/configuration-secure/ansible/vars/${environment_tag}-${deployment_tag}.yml ]]; then + extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/${environment_tag}-${deployment_tag}.yml" +fi + +export PYTHONUNBUFFERED=1 +env +ansible-playbook -v -D -u $ansible_ssh_user $ansible_play -i ./ec2.py $ansible_task_tags --limit $ansible_limit $extra_var_args $ansible_extra_vars diff --git a/util/jenkins/stage_release.py b/util/jenkins/stage_release.py deleted file mode 100644 index 0bd73f69627..00000000000 --- a/util/jenkins/stage_release.py +++ /dev/null @@ -1,232 +0,0 @@ -""" -Take in a YAML file with the basic data of all the things we could -deploy and command line hashes for the repos that we want to deploy -right now. - -Example Config YAML file: ---- -DOC_STORE_CONFIG: - hosts: [ list, of, mongo, hosts] - port: # - db: 'db' - user: 'jenkins' - password: 'password' - -configuration_repo: "/path/to/configuration/repo" -configuration_secure_repo: "/path/to/configuration-secure" - -repos: - edxapp: - plays: - - edxapp - - worker - xqueue: - plays: - - xqueue - 6.00x: - plays: - - xserver - xserver: - plays: - - xserver - -deployments: - edx: - - stage - - prod - edge: - - stage - - prod - loadtest: - - stage - -# A jenkins URL to post requests for building AMIs -abbey_url: "http://..../" - -# A mapping of plays to base AMIs -base_ami:{} - -# The default AMI to use if there isn't one specific to your plays. -default_base_ami: '' ---- -""" -import argparse -import json -import yaml -import logging as log -import requests -from datetime import datetime -from git import Repo -from pprint import pformat -from pymongo import MongoClient, DESCENDING - -log.basicConfig(level=log.DEBUG) - -def uri_from(doc_store_config): - """ - Convert the below structure to a mongodb uri. - - DOC_STORE_CONFIG: - hosts: - - 'host1.com' - - 'host2.com' - port: 10012 - db: 'devops' - user: 'username' - password: 'password' - """ - - uri_format = "mongodb://{user}:{password}@{hosts}/{db}" - host_format = "{host}:{port}" - - port = doc_store_config['port'] - host_uris = [host_format.format(host=host,port=port) for host in doc_store_config['hosts']] - return uri_format.format( - user=doc_store_config['user'], - password=doc_store_config['password'], - hosts=",".join(host_uris), - db=doc_store_config['db']) - -def prepare_release(args): - config = yaml.safe_load(open(args.config)) - mongo_uri = uri_from(config['DOC_STORE_CONFIG']) - client = MongoClient(mongo_uri) - db = client[config['DOC_STORE_CONFIG']['db']] - - # Get configuration repo versions - config_repo_ver = Repo(config['configuration_repo']).commit().hexsha - config_secure_ver = Repo(config['configuration_secure_repo']).commit().hexsha - - # Parse the vars. - var_array = map(lambda key_value: key_value.split('='), args.REPOS) - update_repos = { item[0]:item[1] for item in var_array } - log.info("Update repos: {}".format(pformat(update_repos))) - - release = {} - now = datetime.utcnow() - release['_id'] = args.release_id - release['date_created'] = now - release['date_modified'] = now - release['build_status'] = 'Unknown' - release['build_user'] = args.user - - - release_coll = db[args.deployment] - releases = release_coll.find({'build_status': 'Succeeded'}).sort('_id', DESCENDING) - all_plays = {} - - try: - last_successful = releases.next() - all_plays = last_successful['plays'] - except StopIteration: - # No successful builds. - log.warn("No Previously successful builds.") - - # For all repos that were updated - for repo, ref in update_repos.items(): - var_name = "{}_version".format(repo.replace('-','_')) - if repo not in config['repos']: - raise Exception("No info for repo with name '{}'".format(repo)) - - # For any play that uses the updated repo - for play in config['repos'][repo]: - if play not in all_plays: - all_plays[play] = {} - - if 'vars' not in all_plays[play]: - all_plays[play]['vars'] = {} - - all_plays[play]['vars'][var_name] = ref - # Configuration to use to build these AMIs - all_plays[play]['configuration_ref'] = config_repo_ver - all_plays[play]['configuration_secure_ref'] = config_secure_ver - - # Set amis to None for all envs of this deployment - all_plays[play]['amis'] = {} - for env in config['deployments'][args.deployment]: - # Check the AMIs collection to see if an ami already exist - # for this configuration. - potential_ami = ami_for(db, env, - args.deployment, - play, config_repo_ver, - config_secure_ver, - ref) - if potential_ami: - all_plays[play]['amis'][env] = potential_ami['_id'] - else: - all_plays[play]['amis'][env] = None - - release['plays'] = all_plays - if args.noop: - print("Would insert into release collection: {}".format(pformat(release))) - else: - release_coll.insert(release) - # All plays that need new AMIs have been updated. - notify_abbey(config, args.deployment, - all_plays, args.release_id, mongo_uri, config_repo_ver, - config_secure_ver, args.noop) - -def ami_for(db, env, deployment, play, configuration, - configuration_secure, ansible_vars): - - ami_signature = { - 'env': env, - 'deployment': deployment, - 'play': play, - 'configuration_ref': configuration, - 'configuration_secure_ref': configuration_secure, - 'vars': ansible_vars, - } - - return db.amis.find_one(ami_signature) - -def notify_abbey(config, deployment, all_plays, release_id, - mongo_uri, configuration_ref, configuration_secure_ref, noop=False): - abbey_url = config['abbey_url'] - base_amis = config['base_amis'] - default_base = config['default_base_ami'] - - for play_name, play in all_plays.items(): - for env, ami in play['amis'].items(): - if ami is None: - params = {} - params['play'] = play_name - params['deployment'] = deployment - params['environment'] = env - params['vars'] = yaml.safe_dump(play['vars'], default_flow_style=False) - params['release_id'] = release_id - params['mongo_uri'] = mongo_uri - params['configuration'] = configuration_ref - params['configuration_secure'] = configuration_secure_ref - params['base_ami'] = base_amis.get(play_name, default_base) - - log.info("Need ami for {}".format(pformat(params))) - if noop: - r = requests.Request('POST', abbey_url, params=params) - url = r.prepare().url - print("Would have posted: {}".format(url)) - else: - r = requests.post(abbey_url, params=params) - - log.info("Sent request got {}".format(r)) - if r.status_code != 200: - # Something went wrong. - msg = "Failed to submit request with params: {}" - raise Exception(msg.format(pformat(params))) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Prepare a new release.") - parser.add_argument('-c', '--config', required=True, help="Configuration for deploys") - parser.add_argument('-u', '--user', required=True, help="User staging the release.") - msg = "The deployment to build for eg. edx, edge, loadtest" - parser.add_argument('-d', '--deployment', required=True, help=msg) - parser.add_argument('-r', '--release-id', required=True, help="Id of Release.") - parser.add_argument('-n', '--noop', action='http://github.com/store_true', - help="Run without sending requests to abbey.") - parser.add_argument('REPOS', nargs='+', - help="Any number of var=value(no spcae around '='" + \ - " e.g. 'edxapp=3233bac xqueue=92832ab'") - - args = parser.parse_args() - log.debug(args) - prepare_release(args) diff --git a/util/jenkins/virtualenv_tools.sh b/util/jenkins/virtualenv_tools.sh new file mode 100644 index 00000000000..01f51d3ae5c --- /dev/null +++ b/util/jenkins/virtualenv_tools.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# function to create a virtual environment in a directory separate from +# where it is called. Its name is predictable based on where the script +# is called +# +# . /edx/var/jenkins/jobvenvs/virtualenv_tools.sh +# create_virtualenv --python=python3.8 --clear +# . "$venvpath/bin/activate" +# +# Optional Environmental Variables: +# +# JOBVENVDIR - where on the system to create the virtualenv +# - e.g. /edx/var/jenkins/jobvenvs +# +# Reason for existence: shiningpanda, the jenkins plugin that manages our +# virtualenvironments for jenkins jobs, is no longer supported so we need +# to stop using it. The tricky part is shiningpanda uses virtualenvwrapper +# underneath the hood, so while we're moving jenkins jobs to python3.8 +# and beyond withOUT shiningpanda, we want to be careful to not futz with +# virtualenvwrapper environmental variables (which are required for it to +# function). Therefore, we have this separate implementation of virtualenv +# management. +# +# Why not create virtual environments right in the jenkins workspace +# where the job is run? Because workspaces are so deep in the filesystem +# that the autogenerated shebang line created by virtualenv on things in +# the virtualenv's bin directory will often be too long for the OS to +# parse. + +function create_virtualenv () { + if [ -z "${JOBVENVDIR:-}" ] + then + echo "No JOBVENVDIR found. Using default value." >&2 + JOBVENVDIR="/edx/var/jenkins/jobvenvs" + fi + + # create a unique hash for the job based location of where job is run + venvname="$(pwd | md5sum | cut -d' ' -f1)" + + # create the virtualenv + virtualenv "$@" "$JOBVENVDIR/$venvname" + + # This variable is created in global scope if function is sourced + # so we can access it after running this function. + venvpath="$JOBVENVDIR/$venvname" +} diff --git a/util/jenkins/worker-container-provisioner.sh b/util/jenkins/worker-container-provisioner.sh new file mode 100644 index 00000000000..dd38e67a4ad --- /dev/null +++ b/util/jenkins/worker-container-provisioner.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +set -ex + +# Install pre-reqs packages +function install_pre_reqs() { + YQ_VERSION="4.27.5" + wget https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_linux_amd64 -O /usr/bin/yq + chmod +x /usr/bin/yq +} + +# Render docker-compose file for celery workers +function render_docker_compose() { + # Set common environment variables and volumes for edxapp celery workers + if [ "${LC_WORKER_OF}" == "edxapp" ] ; then + worker_service_volume_mappings=("/edx/var/edxapp:/edx/var/edxapp" "/edx/etc/lms.yml:/edx/etc/lms.yml" "/edx/etc/cms.yml:/edx/etc/cms.yml" "/edx/app/${LC_WORKER_OF}/.boto:/edx/app/${LC_WORKER_OF}/.boto" "/var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock") + worker_service_env_mappings=("CONCURRENCY=1" "LOGLEVEL=info" "LANG=en_US.UTF-8" "PYTHONPATH=/edx/app/${LC_WORKER_OF}/${LC_WORKER_SERVICE_REPO}" "BOTO_CONFIG=/edx/app/${LC_WORKER_OF}/.boto" "LMS_CFG=/edx/etc/lms.yml" "STUDIO_CFG=/edx/etc/cms.yml" "CMS_CFG=/edx/etc/cms.yml") + fi + + worker_celery_path="/edx/app/${LC_WORKER_OF}/venvs/${LC_WORKER_OF}/bin/celery" + readarray worker_cfg < <(echo "${LC_WORKER_CFG}" | yq e -o=j -I=0 '.worker_cfg[]') + + cat < $1 +--- +version: "3.9" +services: +$( + for worker_config in "${worker_cfg[@]}"; do + worker_service_variant=$(echo "${worker_config}" | yq -e '.service_variant' -) + worker_queue=$(echo "${worker_config}" | yq -e '.queue' -) + worker_concurrency=$(echo "${worker_config}" | yq -e '.concurrency' -) + prefetch_optimization=$(echo "${worker_config}" | yq -e '.prefetch_optimization' -) + worker_service_name="${worker_service_variant}_${worker_queue}_${worker_concurrency}" + echo -e " ${worker_service_name}:" + echo -e " network_mode: host" + echo -e " image: ${LC_WORKER_IMAGE_NAME}:latest" + echo -e " container_name: $worker_service_name" + echo -e " user: \"www-data:www-data\"" + echo -e " command: ${worker_celery_path} --app=${worker_service_variant}.celery:APP worker --loglevel=info --queues=edx.${worker_service_variant}.core.${worker_queue} --hostname=edx.${worker_service_variant}.core.${worker_queue}.%%h --concurrency=${worker_concurrency} -O ${prefetch_optimization}" + echo -e " volumes:" + for volume_map in ${worker_service_volume_mappings[@]} ; do + echo -e " - ${volume_map}" + done + echo -e " environment:" + echo -e " - SERVICE_VARIANT=${worker_service_variant}" + echo -e " - DJANGO_SETTINGS_MODULE=${worker_service_variant}.envs.docker-production" + echo -e " - EDX_PLATFORM_SETTINGS=docker-production" + echo -e " - EDX_REST_API_CLIENT_NAME=edx.${worker_service_variant}.core.${worker_queue}" + for env_map in ${worker_service_env_mappings[@]} ; do + echo -e " - ${env_map}" + done + done +) +EOF +} + +install_pre_reqs + +# checkout git repo +if [ ! -d "/edx/app/${LC_WORKER_OF}" ]; then + mkdir /edx/app/${LC_WORKER_OF} +fi + +if [ ! -d "/edx/app/${LC_WORKER_OF}/${LC_WORKER_SERVICE_REPO}" ]; then + git clone https://github.com/edx/${LC_WORKER_SERVICE_REPO}.git /edx/app/${LC_WORKER_OF}/${LC_WORKER_SERVICE_REPO} + cd /edx/app/${LC_WORKER_OF}/${LC_WORKER_SERVICE_REPO} && git checkout ${LC_WORKER_SERVICE_REPO_VERSION} +fi + +# Check if docker image already exists. If it doesn't, build it. +if ! $(docker image inspect ${LC_WORKER_IMAGE_NAME}:latest >/dev/null 2>&1 && echo true || echo false) ; then + cd /edx/app/${LC_WORKER_OF}/${LC_WORKER_SERVICE_REPO} + time DOCKER_BUILDKIT=1 docker build . -t ${LC_WORKER_IMAGE_NAME}:latest --target base +fi + +# Render a docker-compose file for workers +render_docker_compose "/home/$LC_SANDBOX_USER/docker-compose-${LC_WORKER_OF}-workers.yaml" + +# Run the docker-compose file +docker-compose -f "/home/$LC_SANDBOX_USER/docker-compose-${LC_WORKER_OF}-workers.yaml" up -d diff --git a/util/json_lint.sh b/util/json_lint.sh deleted file mode 100755 index 5f64984d37c..00000000000 --- a/util/json_lint.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# A very simple check to see if the json files in the project at least compile. -# If they do not, a cryptic message that might be helpful is produced. - - -# Save current directory so we can come back; change to repo root -STARTED_FROM=`pwd` -cd $(git rev-parse --show-toplevel) - -# Do very basic syntax check of every json file to make sure it's valid format -for file in `find . -iname '*.json'`; do - cat $file | python -m json.tool 1>/dev/null 2>json_complaint.err; - retval=$? - if [ $retval != 0 ]; then - echo "JSON errors in $file" - cat json_complaint.err - rm -f json_complaint.err - cd $STARTED_FROM - exit $retval; - fi -done - -# Everything went ok! -rm -f json_complaint.err -cd $STARTED_FROM -exit 0 diff --git a/util/maintenance.sh b/util/maintenance.sh new file mode 100755 index 00000000000..097cd1c230d --- /dev/null +++ b/util/maintenance.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +usage() { + echo "Usage: $0 environment-deploy (enable|disable)" + echo + echo "Examples:" + echo " Turn on maintenance page for stage-edx: $0 stage-edx enable" + echo " Turn off maintenance page for stage-edx: $0 stage-edx disable" + exit 1 +} + +ED=$1 +ENABLE_ARG=$2 + +case $ED in + stage-edx|prod-edx|prod-edge) + ;; + *) + echo "ERROR: environment-deploy must be one of stage-edx, prod-edx or prod-edge" + echo + usage + ;; +esac + +case $ENABLE_ARG in + enable) + ENABLE="True" + ;; + disable) + ENABLE="False" + ;; + *) + echo "ERROR: must specify enable or disable" + echo + usage + ;; +esac + +INVENTORY=$(aws ec2 describe-instances --filter "Name=tag:Name,Values=${ED}-edxapp,${ED}-studio,${ED}-worker" --query 'Reservations[].Instances[].PrivateIpAddress' --output text | tr '\t' ',') +ENABLE_EXTRA_VAR="{\"ENABLE_MAINTENANCE\": ${ENABLE}}" + +ansible-playbook ./edx_maintenance.yml -i "${INVENTORY}," -e "${ENABLE_EXTRA_VAR}" diff --git a/util/old/import_xml_courses.py b/util/old/import_xml_courses.py new file mode 100644 index 00000000000..7a7cdd66779 --- /dev/null +++ b/util/old/import_xml_courses.py @@ -0,0 +1,66 @@ +# Import XML Courses from git repos into the CMS. +# Run with sudo and make sure the user can clone +# the course repos. + +# Output Has per course +#{ +# repo_url: +# repo_name: +# org: +# course: +# run: +# disposition: +# version: +#} + +import argparse +from os.path import basename +import yaml + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="Import XML courses from git repos.") + parser.add_argument("-c", "--courses-csv", required=True, + help="A CSV of xml courses to import.") + args = parser.parse_args() + + courses = open(args.courses_csv) + + all_course_data = [] + all_xml_mappings = {} + for line in courses: + cols = line.strip().split(',') + slug = cols[0] + author_format = cols[1] + disposition = cols[2] + repo_url = cols[4] + version = cols[5] + + if author_format.lower() != 'xml' \ + or disposition.lower() == "don't import": + continue + + # Checkout w/tilde + org, course, run = slug.split("/") + repo_name = "{}~{}".format(basename(repo_url).rstrip('.git'), run) + + course_info = { + "repo_url": repo_url, + "repo_name": repo_name, + "org": org, + "course": course, + "run": run, + "disposition": disposition.lower(), + "version": version, + } + all_course_data.append(course_info) + + if disposition.lower() == "on disk": + all_xml_mappings[slug] = 'xml' + + edxapp_xml_courses = { + "EDXAPP_XML_COURSES": all_course_data, + "EDXAPP_XML_MAPPINGS": all_xml_mappings, + "EDXAPP_XML_FROM_GIT": True + } + print(yaml.safe_dump(edxapp_xml_courses, default_flow_style=False)) diff --git a/util/parsefiles.py b/util/parsefiles.py new file mode 100644 index 00000000000..de1184d7c11 --- /dev/null +++ b/util/parsefiles.py @@ -0,0 +1,468 @@ +import os +import pathlib2 +import logging +import yaml +import sys +import networkx as nx +from collections import namedtuple +import argparse +import six + +TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR") +DOCKER_PATH_ROOT = pathlib2.Path(TRAVIS_BUILD_DIR, "docker", "build") +DOCKER_PLAYS_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "docker", "plays") +CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml") +LOGGER = logging.getLogger(__name__) + + +def build_graph(git_dir, roles_dirs, aws_play_dirs, docker_play_dirs): + """ + Builds a dependency graph that shows relationships between roles and playbooks. + An edge [A, B], where A and B are roles, signifies that A depends on B. An edge + [C, D], where C is a playbook and D is a role, signifies that C uses D. + + Input: + git_dir: A path to the top-most directory in the local git repository tool is to be run in. + roles_dirs: A list of relative paths to directories in which Ansible roles reside. + aws_play_dirs: A list of relative paths to directories in which AWS Ansible playbooks reside. + docker_play_dirs: A list of relative paths to directories in which Docker Ansible playbooks reside. + + """ + + graph = nx.DiGraph() + + _map_roles_to_roles(graph, roles_dirs, git_dir, "dependencies", "role", "role") + _map_plays_to_roles(graph, aws_play_dirs, git_dir, "roles", "aws_playbook", "role") + _map_plays_to_roles(graph, docker_play_dirs, git_dir, "roles", "docker_playbook", "role") + + return graph + +def _map_roles_to_roles(graph, dirs, git_dir, key, type_1, type_2): + """ + Maps roles to the roles that they depend on. + + Input: + graph: A networkx digraph that is used to map Ansible dependencies. + dirs: A list of relative paths to directories in which Ansible roles reside. + git_dir: A path to the top-most directory in the local git repository tool is to be run in. + key: The key in a role yaml file in dirs that maps to relevant role data. In this case, key is + "dependencies", because a role's dependent roles is of interest. + type_1: Given edges A-B, the type of node A. + type_2: Given edges A-B, the type of node B. + Since this function maps roles to their dependent roles, both type_1 and type_2 are "role". + """ + + Node = namedtuple('Node', ['name', 'type']) + + # for each role directory + for d in dirs: + d = pathlib2.Path(git_dir, d) + + # for all files/sub-directories in directory + for item in d.iterdir(): + + # attempts to find meta/*.yml file in item directory tree + roles = {f for f in item.glob("meta/*.yml")} + + # if a meta/*.yml file(s) exists for a role + if roles: + # for each role + for role in roles: + yaml_file = _open_yaml_file(role) + + # if not an empty yaml file and key in file + if yaml_file is not None and key in yaml_file: + # for each dependent role; yaml_file["dependencies"] returns list of + # dependent roles + for dependent in yaml_file[key]: + # get role name of each dependent role + name = _get_role_name(dependent) + + # add node for type_1, typically role + node_1 = Node(item.name, type_1) + + # add node for type_2, typically dependent role + node_2 = Node(name, type_2) + + # add edge, typically dependent role - role + graph.add_edge(node_2, node_1) + +def _map_plays_to_roles(graph, dirs, git_dir, key, type_1, type_2): + """ + Maps plays to the roles they use. + + Input: + graph: A networkx digraph that is used to map Ansible dependencies. + dirs: A list of relative paths to directories in which Ansible playbooks reside. + git_dir: A path to the top-most directory in the local git repository tool is to be run in. + key: The key in a playbook yaml file in dirs that maps to relevant playbook data. In this case, key is + "roles", because the roles used by a playbook is of interest. + type_1: Given edges A-B, the type of node A. + type_2: Given edges A-B, the type of node B. + Since this function maps plays to the roles they use, both type_1 is a type of playbook and type_2 is "role". + """ + + Node = namedtuple('Node', ['name', 'type']) + + # for each play directory + for d in dirs: + d = pathlib2.Path(git_dir, d) + + # for all files/sub-directories in directory + for item in d.iterdir(): + + # if item is a file ending in .yml + if item.match("*.yml"): + # open .yml file for playbook + yaml_file = _open_yaml_file(item) + + # if not an empty yaml file + if yaml_file is not None: + # for each play in yaml file + for play in yaml_file: + # if specified key in yaml file (e.g. "roles") + if key in play: + # for each role + for role in play[key]: + # get role name + name = _get_role_name(role) + + #add node for type_1, typically for playbook + node_1 = Node(item.stem, type_1) + + # add node for type_2, typically for role + node_2 = Node(name, type_2) + + # add edge, typically role - playbook that uses it + graph.add_edge(node_2, node_1) + +def _open_yaml_file(file_str): + """ + Opens yaml file. + + Input: + file_str: The path to yaml file to be opened. + """ + + with (file_str.open(mode='r')) as file: + try: + yaml_file = yaml.safe_load(file) + return yaml_file + except yaml.YAMLError as exc: + LOGGER.error("error in configuration file: %s" % str(exc)) + sys.exit(1) + + +def change_set_to_roles(files, git_dir, roles_dirs, playbooks_dirs, graph): + """ + Converts change set consisting of a number of files to the roles that they represent/contain. + + Input: + files: A list of files modified by a commit range. + git_dir: A path to the top-most directory in the local git repository tool is to be run in. + roles_dirs: A list of relative paths to directories in which Ansible roles reside. + playbook_dirs: A list of relative paths to directories in which Ansible playbooks reside. + graph: A networkx digraph that is used to map Ansible dependencies. + """ + + # set of roles + items = set() + + # for all directories containing roles + for role_dir in roles_dirs: + role_dir_path = pathlib2.Path(git_dir, role_dir) + + # get all files in the directories containing roles (i.e. all the roles in that directory) + candidate_files = {f for f in role_dir_path.glob("**/*")} + + # for all the files in the change set + for f in files: + file_path = pathlib2.Path(git_dir, f) + + # if the change set file is in the set of role files + if file_path in candidate_files: + # get name of role and add it to set of roles of the change set + items.add(_get_role_name_from_file(file_path)) + return items + + +def get_plays(files, git_dir, playbooks_dirs): + """ + Determines which files in the change set are aws playbooks + + files: A list of files modified by a commit range. + git_dir: A path to the top-most directory in the local git repository tool is to be run in. + playbook_dirs: A list of relative paths to directories in which Ansible playbooks reside. + + """ + + plays = set() + + # for all directories containing playbooks + for play_dir in playbooks_dirs: + play_dir_path = pathlib2.Path(git_dir, play_dir) + + # get all files in directory containing playbook that end with yml extension + # (i.e. all playbooks in that directory) + candidate_files = {f for f in play_dir_path.glob("*.yml")} + + # for all filse in the change set + for f in files: + file_path = pathlib2.Path(git_dir, f) + + # if the change set file is in the set of playbook files + if file_path in candidate_files: + plays.add(_get_playbook_name_from_file(file_path)) + + return plays + + +def _get_playbook_name_from_file(path): + """ + Gets name of playbook from the filepath, which is the last part of the filepath. + + Input: + path: A path to the playbook + """ + # get last part of filepath + return path.stem + + +def _get_role_name_from_file(path): + """ + Gets name of role from the filepath, which is the directory following occurence of the word "roles". + + Input: + path: A path to the role + """ + # get individual parts of a file path + dirs = path.parts + + # name of role is the next part of the file path after "roles" + return dirs[dirs.index("roles")+1] + + +def get_dependencies(roles, graph): + """ + Determines all roles dependent on set of roles and returns set containing both. + + Input: + roles: A set of roles. + graph: A networkx digraph that is used to map Ansible dependencies. + """ + + items = set() + + for role in roles: + # add the role itself + items.add(role) + + # add all the roles that depend on the role + dependents = nx.descendants(graph, (role, "role")) + + items |= {dependent.name for dependent in dependents} + + return items + + +def get_docker_plays(roles, graph): + """Gets all docker plays that contain at least role in common with roles.""" + + # dict to determine coverage of plays + coverage = dict.fromkeys(roles, False) + + items = set() + + docker_plays = {node.name for node in graph.nodes() if node.type == "docker_playbook"} + + for play in docker_plays: + # all roles that are used by play + roles_nodes = nx.all_neighbors(graph, (play, "docker_playbook")) + + docker_roles = {role.name for role in roles_nodes} + + # compares roles and docker roles + common_roles = roles & docker_roles + + # if their intersection is non-empty, add the docker role + if common_roles: + items.add(play) + + # each aws role that was in common is marked as being covered by a docker play + for role in common_roles: + coverage[role] = True + + # check coverage of roles + for role in coverage: + if not coverage[role]: + LOGGER.warning("role '%s' is not covered." % role) + + return items + + +def filter_docker_plays(plays, repo_path): + """Filters out docker plays that do not have a Dockerfile.""" + + items = set() + + for play in plays: + dockerfile = pathlib2.Path(DOCKER_PATH_ROOT, play, "Dockerfile") + + if dockerfile.exists(): + items.add(play) + else: + LOGGER.warning("covered playbook '%s' does not have Dockerfile." % play) + + return items + + +def _get_role_name(role): + """ + Resolves a role name from either a simple declaration or a dictionary style declaration. + + A simple declaration would look like: + - foo + + A dictionary style declaration would look like: + - role: rbenv + rbenv_user: "{{ forum_user }}" + rbenv_dir: "{{ forum_app_dir }}" + rbenv_ruby_version: "{{ FORUM_RUBY_VERSION }}" + + :param role: + :return: + """ + if isinstance(role, dict): + return role['role'] + elif isinstance(role, str): + return role + else: + LOGGER.warning("role %s could not be resolved to a role name." % role) + return None + + +def _get_modified_dockerfiles(files, git_dir): + """ + Return changed files under docker/build directory + :param files: + :param git_dir: + :return: + """ + items = set() + candidate_files = {f for f in DOCKER_PATH_ROOT.glob("**/*")} + for f in files: + file_path = pathlib2.Path(git_dir, f) + if file_path in candidate_files: + play = items.add(_get_play_name(file_path)) + + if play is not None: + items.add(play) + + return items + + +def get_modified_dockerfiles_plays(files, git_dir): + """ + Return changed files under docker/plays directory + :param files: + :param git_dir: + :return: + """ + items = set() + candidate_files = {f for f in DOCKER_PLAYS_PATH.glob("*.yml")} + for f in files: + file_path = pathlib2.Path(git_dir, f) + if file_path in candidate_files: + items.add(_get_playbook_name_from_file(file_path)) + return items + + +def _get_play_name(path): + + """ + Gets name of play from the filepath, which is the token + after either "docker/build" in the file path. + + Input: + path: A path to the changed file under docker/build dir + """ + + # attempt to extract Docker image name from file path; splits the path of a file over + # "docker/build/", because the first token after "docker/build/" is the image name + suffix = (str(path)).split(str(os.path.join('docker', 'build', ''))) + + # if file path contains "docker/build/" + if len(suffix) > 1: + # split suffix over separators to file path components separately + suffix_parts = suffix[1].split(os.sep) + # first token will be image name; /docker/build//... + return suffix_parts[0] + return None + + +def arg_parse(): + + parser = argparse.ArgumentParser(description = 'Given a commit range, analyze Ansible dependencies between roles and playbooks ' + 'and output a list of Docker plays affected by this commit range via these dependencies.') + parser.add_argument('--verbose', help="set warnings to be displayed", action="http://github.com/store_true") + + return parser.parse_args() + +if __name__ == '__main__': + + args = arg_parse() + + # configure logging + logging.basicConfig() + + if not args.verbose: + logging.disable(logging.WARNING) + + # set of modified files in the commit range + change_set = set() + + # read from standard in + for line in sys.stdin: + change_set.add(line.rstrip()) + + # configuration file is expected to be in the following format: + # + # roles_paths: + # - + # aws_plays_paths: + # - + # docker_plays_paths: + # - + + # read config file + config = _open_yaml_file(CONFIG_FILE_PATH) + + # build graph + graph = build_graph(TRAVIS_BUILD_DIR, config["roles_paths"], config["aws_plays_paths"], config["docker_plays_paths"]) + + # gets any playbooks in the commit range + plays = get_plays(change_set, TRAVIS_BUILD_DIR, config["aws_plays_paths"]) + + # transforms list of roles and plays into list of original roles and the roles contained in the plays + roles = change_set_to_roles(change_set, TRAVIS_BUILD_DIR, config["roles_paths"], config["aws_plays_paths"], graph) + + # expands roles set to include roles that are dependent on existing roles + dependent_roles = get_dependencies(roles, graph) + + # determine which docker plays cover at least one role + docker_plays = get_docker_plays(dependent_roles, graph) + + docker_plays = docker_plays | plays + + # filter out docker plays without a Dockerfile + docker_plays = filter_docker_plays(docker_plays, TRAVIS_BUILD_DIR) + + # Add playbooks to the list whose docker file has been modified + modified_docker_files = _get_modified_dockerfiles(change_set, TRAVIS_BUILD_DIR) + + # Add plays to the list which got changed in docker/plays directory + docker_plays_dir = get_modified_dockerfiles_plays(change_set, TRAVIS_BUILD_DIR) + + all_plays = set(set(docker_plays) | set( modified_docker_files) | set(docker_plays_dir)) + + print(" ".join(all_plays)) diff --git a/util/parsefiles_config.yml b/util/parsefiles_config.yml new file mode 100644 index 00000000000..99c6ed93aab --- /dev/null +++ b/util/parsefiles_config.yml @@ -0,0 +1,47 @@ +roles_paths: + - playbooks/roles +aws_plays_paths: + - playbooks +docker_plays_paths: + - docker/plays +docker_ignore_list: + - go-agent + - go-agent-marketing + - go-agent-frontend +weights: + - discovery: 6 + - go-server: 5 + - xqwatcher: 3 + - analytics_api: 1 + - edxapp: 28 + - insights: 4 + - credentials: 8 + - forum: 7 + - nginx: 1 + - xqueue: 2 + - trusty-common: 5 + - xenial-common: 6 + - bionic-common: 6 + - ecommerce: 6 + - rabbitmq: 2 + - automated: 1 + - mysql: 2 + - elasticsearch: 7 + - docker-tools: 3 + - tools_jenkins: 8 + - ecomworker: 4 + - notes: 2 + - mongo: 1 + - analytics_pipeline: 8 + - analytics_pipeline_hadoop_datanode: 2 + - analytics_pipeline_hadoop_namenode: 3 + - analytics_pipeline_hadoop_nodemanager: 3 + - analytics_pipeline_hadoop_resourcemanager: 2 + - analytics_pipeline_spark_master: 1 + - analytics_pipeline_spark_worker: 1 + - chrome: 1 + - firefox: 1 + - flower: 1 + - registrar: 3 + - designer: 3 + - enterprise_catalog: 3 diff --git a/util/pingdom/.gitignore b/util/pingdom/.gitignore new file mode 100644 index 00000000000..74f17a1b904 --- /dev/null +++ b/util/pingdom/.gitignore @@ -0,0 +1 @@ +/.Python diff --git a/util/pingdom/README.rst b/util/pingdom/README.rst new file mode 100644 index 00000000000..f1fcd49bb87 --- /dev/null +++ b/util/pingdom/README.rst @@ -0,0 +1,21 @@ +Pingdom check creation utility +************ + +To use: + + .. code-block:: bash + + $ virtualenv -p /usr/bin/python .Python + $ source .Python/bin/activate + $ pip install -r requirements.txt + + # Put the export commands in your bashrc or something similar + $ export PINGDOM_EMAIL=MY_USERNAME + $ export PINGDOM_PASSWORD=MY_PASSWORD + $ export PINGDOM_API_KEY=MY_API_KEY + + $ python create_pingdom_alerts.py --alert-config-file ~/my-config-file + +There is an [example](example.yml) config file located in this directory. + + diff --git a/util/pingdom/create_pingdom_alerts.py b/util/pingdom/create_pingdom_alerts.py new file mode 100644 index 00000000000..0140a79cfbd --- /dev/null +++ b/util/pingdom/create_pingdom_alerts.py @@ -0,0 +1,233 @@ +import json +import click +import yaml +import requests + +import json + + +class PingdomInvalidResponse(Exception): + pass + + +@click.command() +@click.option('--noop', is_flag=True, help="Don't apply changes to Pingdom.") +@click.option('--pingdom-email', required=True, + help='Email to use to speak with Pingdom.', + envvar='PINGDOM_EMAIL') +@click.option('--pingdom-password', required=True, + help='Password to use to speak with Pingdom.', + envvar='PINGDOM_PASSWORD') +@click.option('--pingdom-api-key', required=True, + help='API Key to use to speak with Pingdom.', + envvar='PINGDOM_API_KEY') +@click.option('--alert-config-file', required=True, + help="path to config file", + envvar='ALERT_CONFIG_FILE') +def main(noop, pingdom_email, pingdom_password, + pingdom_api_key, + alert_config_file): + with open(alert_config_file) as stream: + config_file_content = yaml.safe_load(stream) + config_file_content = replace_user_names_with_userids(pingdom_email, + pingdom_password, + pingdom_api_key, + config_file_content) + + config_file_content = integration_names_to_ids(config_file_content) + check_for_update, checks_by_hostname = build_checks_by_hostname(pingdom_email, + pingdom_password, + pingdom_api_key) + for alert_config in config_file_content['checks']: + if (alert_config['name'], alert_config['host']) not in checks_by_hostname.items(): + # Create new check + if noop: + print("Would CREATE: {}, but you set the noop flag.".format( + alert_config)) + else: + print(f"CREATE: {alert_config}") + create_check(pingdom_email, pingdom_password, + pingdom_api_key, alert_config) + + else: + # Updating existing check + existing_check = check_for_update[alert_config['name']] + if noop: + print(""" + Has changes, would UPDATE: {}, + but you set the noop flag. + """.format(alert_config)) + else: + print(f"Attempting UPDATE: {alert_config}") + # We always update because the parameters to POST check + # and the paramters returned by GET check differ. + # It would be difficult to figure out if changes + # have occured. + update_check(pingdom_email, pingdom_password, + pingdom_api_key, existing_check['id'], + alert_config) + + +def replace_user_names_with_userids(pingdom_email, + pingdom_password, + pingdom_api_key, + config_file_content): + + user_ids_by_name = build_userid_by_name( + pingdom_email, pingdom_password, pingdom_api_key) + for alert in config_file_content['checks']: + user_ids = [] + if 'users' in alert: + for user in alert['users']: + if 'userids' in alert: + user_ids.extend( + [x.strip() for x in alert['userids'].split(',')]) + if user not in user_ids_by_name: + raise PingdomInvalidResponse( + f'Pingdom has no user with the name {user}') + user_id = user_ids_by_name[user] + user_ids.append(user_id) + del alert['users'] + alert['userids'] = ','.join(map(str, user_ids)) + return config_file_content + + +def integration_names_to_ids(config_file_content): + integration_ids_by_name = config_file_content['integration_name_to_id_map'] + for alert in config_file_content['checks']: + integration_ids = [] + if 'integrations' in alert: + for integration in alert['integrations']: + if('integrationids' in alert): + integration_ids.extend( + alert['integrationids'].split(',')) + if integration not in list(integration_ids_by_name.keys()): + print( + """ + You specified a integration + that does not exist in + our map. + """) + print( + """ + You may just need to add it to the + build_integrations_by_name method + pingdom does not have an API for this presently... + """) + exit(1) + integration_id = integration_ids_by_name[integration] + integration_ids.append(integration_id) + del alert['integrations'] + alert['integrationids'] = ','.join(map(str, integration_ids)) + return config_file_content + + +def create_check(pingdom_email, pingdom_password, pingdom_api_key, payload): + try: + response = requests.post("https://api.pingdom.com/api/2.1/checks", + headers={ + 'app-key': pingdom_api_key + }, + auth=(pingdom_email, pingdom_password), + params=payload) + response.raise_for_status() + print("Create successful") + except requests.exceptions.HTTPError: + print_error_prefix() + print_request_and_response(response) + exit(1) + return json.loads(response.content.decode('utf-8')) + + +def update_check(pingdom_email, pingdom_password, + pingdom_api_key, id, payload): + if('type' in payload): + del(payload['type']) + try: + url = f"https://api.pingdom.com/api/2.1/checks/%7Bid%7D" + response = requests.put(url, + headers={ + 'app-key': pingdom_api_key + }, + auth=(pingdom_email, pingdom_password), + params=payload) + response.raise_for_status() + print("Update successful") + except requests.exceptions.HTTPError: + print_error_prefix() + print_request_and_response(response) + exit(1) + return json.loads(response.content.decode('utf-8')) + + +def list_checks(pingdom_email, pingdom_password, pingdom_api_key): + try: + response = requests.get("https://api.pingdom.com/api/2.1/checks", + headers={ + 'app-key': pingdom_api_key + }, + auth=(pingdom_email, pingdom_password)) + response.raise_for_status() + except requests.exceptions.HTTPError: + print_error_prefix() + print_request_and_response(response) + exit(1) + return json.loads(response.content.decode('utf-8'))['checks'] + + +def list_users(pingdom_email, pingdom_password, pingdom_api_key): + try: + response = requests.get("https://api.pingdom.com/api/2.1/users", + headers={ + 'app-key': pingdom_api_key + }, + auth=(pingdom_email, pingdom_password)) + response.raise_for_status() + except requests.exceptions.HTTPError: + print_error_prefix() + print_request_and_response(response) + exit(1) + return json.loads(response.content.decode('utf-8')) + + +def build_checks_by_hostname(pingdom_email, pingdom_password, pingdom_api_key): + checks = list_checks(pingdom_email, pingdom_password, pingdom_api_key) + checks_by_hostname = {} + check_for_update = {} + for check in checks: + check_for_update[str(check['name'])] = check + checks_by_hostname[str(check['name'])] = str(check['hostname']) + return check_for_update, checks_by_hostname + + +def build_userid_by_name(pingdom_email, pingdom_password, pingdom_api_key): + user_content = list_users( + pingdom_email, pingdom_password, pingdom_api_key) + users = user_content['users'] + user_ids_by_name = {} + for user in users: + user_ids_by_name[user['name'].strip()] = user['id'] + return user_ids_by_name + + +def print_request_and_response(response): + print("Request:") + for key in response.request.headers: + print(f"{key}: {response.request.headers[key]}") + print("") + print(response.request.body) + print("------------------") + print("Response:") + for key in response.headers: + print(f"{key}: {response.headers[key]}") + print("") + print(response.content.decode('utf-8')) + print("------------------") + + +def print_error_prefix(): + print("Got error from pingdom, dumping request/response:") + + +if __name__ == "__main__": + main() diff --git a/util/pingdom/example.yml b/util/pingdom/example.yml new file mode 100644 index 00000000000..55abbcb929e --- /dev/null +++ b/util/pingdom/example.yml @@ -0,0 +1,27 @@ +checks: + - name: "Cory test alert" + host: "corylee.io" + type: "http" + encryption: false + users: + - "Pager" + - name: "google HTTPS" + host: "google.com" + type: "http" + encryption: true + # Translated to userids for the API call, looked up via the API + users: + - "Pager" + # Translated to integrationids for the API call, looked up from the integration_name_to_id_map + # because no API endpoint exists + integrations: + - "Opsgenie" + +# Names to id map for integrations +integration_name_to_id_map: + Opsgenie: 1 + Learner - OpsGenie: 2 + api.opsgenie.com: 3 + + + diff --git a/util/pingdom/requirements.txt b/util/pingdom/requirements.txt new file mode 100644 index 00000000000..3e9b2a4bbba --- /dev/null +++ b/util/pingdom/requirements.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +certifi==2023.11.17 + # via requests +charset-normalizer==3.3.2 + # via requests +click==6.7 + # via -r requirements/pingdom.in +idna==3.6 + # via requests +pyyaml==6.0.1 + # via -r requirements/pingdom.in +requests==2.31.0 + # via -r requirements/pingdom.in +six==1.14.0 + # via -r requirements/pingdom.in +urllib3==2.1.0 + # via requests diff --git a/util/post-pip-compile.sh b/util/post-pip-compile.sh new file mode 100755 index 00000000000..bca05221c37 --- /dev/null +++ b/util/post-pip-compile.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +# Remove any cruft from a requirements file generated by pip-tools which we don't want to keep + +function show_help { + echo "Usage: post-pip-compile.sh file ..." + echo "Remove any cruft left behind by pip-compile in the given requirements file(s)." + echo "" + echo "Updates the instructions for re-generating each requirements file, and removes" + echo "\"-e\" prefixes which were added to GitHub URLs only so that pip-compile could" + echo "process them correctly." +} + +function clean_file { + FILE_PATH=$1 + TEMP_FILE=${FILE_PATH}.tmp + # Replace the instructions for regenerating the output file. + sed "s/pip-compile --output-file.*/make upgrade/" ${FILE_PATH} > ${TEMP_FILE} + mv ${TEMP_FILE} ${FILE_PATH} +} + +for i in "$@"; do + case ${i} in + -h|--help) + # help or unknown option + show_help + exit 0 + ;; + *) + clean_file ${i} + ;; + esac +done diff --git a/util/publish_rds_logs_to_cloudwatch.py b/util/publish_rds_logs_to_cloudwatch.py new file mode 100755 index 00000000000..a36972c54f1 --- /dev/null +++ b/util/publish_rds_logs_to_cloudwatch.py @@ -0,0 +1,63 @@ +#!/usr/bin/python3 +""" +Publish RDS logs to cloudwatch +Example: + ./publish_rds_logs_to_cloudwatch --db_engine mysql --db_identifier edx-mysql-db + ./publish_rds_logs_to_cloudwatch --db_engine aurora --db_identifier edx-aurora-cluster + +""" +import boto3 +import argparse + +def get_client(): + + rds_client = boto3.client('rds') + return rds_client + +def publish_rds_logs_to_cloudwatch(db_engine,db_identifier,logs_to_publish): + + client = get_client() + try: + if db_engine == "mysql": + response = client.modify_db_instance( + DBInstanceIdentifier=db_identifier, + CloudwatchLogsExportConfiguration={ + 'EnableLogTypes': [ + logs_to_publish + ] + } + ) + if response["ResponseMetadata"]["HTTPStatusCode"] == 200: + id=response["DBInstance"]["DBInstanceIdentifier"] + logs_exports_to_cloudwatch=response["DBInstance"]["EnabledCloudwatchLogsExports"] + print("RDS MySQL DB {} logs {} are enabled to exports to cloudwatch" \ + .format(id,logs_exports_to_cloudwatch)) + elif db_engine == "aurora": + response = client.modify_db_cluster( + DBClusterIdentifier=db_identifier, + CloudwatchLogsExportConfiguration={ + 'EnableLogTypes':[ + logs_to_publish + ] + } + ) + if response["ResponseMetadata"]["HTTPStatusCode"] == 200: + id=response["DBCluster"]["DBClusterIdentifier"] + logs_exports_to_cloudwatch=response["DBCluster"]["EnabledCloudwatchLogsExports"] + print("RDS Aurora Cluster {} logs {} are enabled to exports to cloudwatch" \ + .format(id,logs_exports_to_cloudwatch)) + else: + print("db_engine valid options are: mysql or aurora") + exit() + except Exception as e: + print(e) + +if __name__=="__main__": + + parser = argparse.ArgumentParser() + parser.add_argument('--db_engine', help='RDS engine: mysql or aurora',required=True) + parser.add_argument('--db_identifier', help='RDS instance ID',required=True) + parser.add_argument('--logs_to_publish',help='Logs to export to cloudwatch',default='error') + + args = parser.parse_args() + publish_rds_logs_to_cloudwatch(args.db_engine,args.db_identifier,args.logs_to_publish) diff --git a/util/rabbitmq/shovel.py b/util/rabbitmq/shovel.py new file mode 100644 index 00000000000..0d1538dfd7a --- /dev/null +++ b/util/rabbitmq/shovel.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +import argparse +import subprocess +import requests +from requests.exceptions import HTTPError +import sys +import six + +parser=argparse.ArgumentParser(description='Shovels between RabbitMQ Clusters') +parser.add_argument('--src_host',action='http://github.com/store',dest='src_host') +parser.add_argument('--dest_host',action='http://github.com/store',dest='dest_host',default='127.0.0.1') +parser.add_argument('--src_user',action='http://github.com/store',dest='src_user') +parser.add_argument('--src_user_pass',action='http://github.com/store',dest='src_user_pass') +parser.add_argument('--dest_user',action='http://github.com/store',dest='dest_user') +parser.add_argument('--dest_user_pass',action='http://github.com/store',dest='dest_user_pass') + +args=parser.parse_args() + +src_uri=f'amqp://{args.src_user}:{args.src_user_pass}@{args.src_host}' +dest_uri=f'amqp://{args.dest_user}:{args.dest_user_pass}@{args.dest_host}' +port=15672 + +def list_vhosts(): + url=f'http://{args.src_host}:{port}/api/vhosts' + try: + response=requests.get(url,auth=(args.src_user,args.src_user_pass)) + response.raise_for_status() + vhosts=[v['name'] for v in response.json() if v['name'].startswith('/')] + except Exception as ex: + print(f"Failed to get vhosts: {ex}") + sys.exit(1) + return vhosts + +def list_queues(): + for vhost in list_vhosts(): + url=f'http://{args.src_host}:{port}/api/queues/{vhost}' + try: + response=requests.get(url,auth=(args.src_user,args.src_user_pass)) + response.raise_for_status() + queues=[q['name'] for q in response.json()] + except Exception as ex: + print(f"Failed to get queues: {ex}") + sys.exit(1) + return queues + +def create_shovel(shovel,arg): + cmd=f"/usr/sbin/rabbitmqctl set_parameter shovel {shovel} '{arg}'" + try: + subprocess.check_output( + cmd,stderr=subprocess.STDOUT,shell=True) + except subprocess.CalledProcessError as ex: + return ex.output + +if __name__=='__main__': + + """ + command line arguments are expected to be in following format + python shovel.py --src_host --src_user --src_user_pass \ + --dest_host --dest_user --dest_user_pass + """ + output={} + for queue in list_queues(): + """ + Ignore queues celeryev and *.pidbox to shovel + """ + q=queue.split('.') + if (q[0]!='celeryev' and q[-1]!='pidbox'): + args=f'{{"src-uri": "{src_uri}", "src-queue": "{queue}","dest-uri": "{dest_uri}","dest-queue": "{queue}"}}' + print(f"Running shovel for queue:{queue}") + shovel_output=create_shovel(queue,args) + if shovel_output is not None: + content=str(shovel_output,"utf-8") + output[queue]=content + for k,v in output.items(): + print(k,v) diff --git a/util/rds_sgs/rds_sgs.py b/util/rds_sgs/rds_sgs.py new file mode 100755 index 00000000000..0ae32d944fb --- /dev/null +++ b/util/rds_sgs/rds_sgs.py @@ -0,0 +1,80 @@ +#!/usr/bin/python3 + +import boto3 +import click + +@click.command() +@click.argument('mode', type=click.Choice(['by_db', 'by_sg'])) +def command(mode): + """ + MODES: + + by_db: List rules for all RDS instances and which security group(s) they come from + + by_sg: shows each security group and which RDS instances are using it + """ + client = boto3.client('rds') + ec2_client = boto3.client('ec2') + dbs = client.describe_db_instances() + dbs_by_sg = {} + for db in dbs['DBInstances']: + open_ports = {} + sg_ids = [sg['VpcSecurityGroupId'] for sg in db['VpcSecurityGroups']] + for sg_id in sg_ids: + sg = ec2_client.describe_security_groups(GroupIds=[sg_id])['SecurityGroups'][0] + sg_id_and_name = "{} ({})".format(sg_id, sg['GroupName']) + if sg_id_and_name in dbs_by_sg: + dbs_by_sg[sg_id_and_name].append(db['DBInstanceIdentifier']) + else: + dbs_by_sg[sg_id_and_name] = [db['DBInstanceIdentifier']] + + if mode == 'by_db': + for permission in sg['IpPermissions']: + if permission['FromPort'] == permission['ToPort']: + ports = permission['FromPort'] + else: + ports = "{}-{}".format(permission['FromPort'],permission['ToPort']) + for IpRange in permission['IpRanges']: + key = IpRange['CidrIp'] + desc = sg['GroupName'] + if 'Description' in IpRange: + desc = "{}|{}".format(desc, IpRange['Description']) + + if ports in open_ports: + if key in open_ports[ports]: + open_ports[ports][key][sg_id] = desc + else: + open_ports[ports][key] = {sg_id: desc} + else: + open_ports[ports] = {key: {sg_id: desc}} + for UserIdGroupPair in permission['UserIdGroupPairs']: + source_sg_id = UserIdGroupPair['GroupId'] + key = "{} ({})".format(source_sg_id, ec2_client.describe_security_groups(GroupIds=[source_sg_id])['SecurityGroups'][0]['GroupName']) + + desc = sg['GroupName'] + if 'Description' in UserIdGroupPair: + desc = "{}|{}".format(desc, UserIdGroupPair['Description']) + + if ports in open_ports: + if key in open_ports[ports]: + open_ports[ports][key][sg_id] = desc + else: + open_ports[ports][key] = {sg_id: desc} + else: + open_ports[ports] = {key: {sg_id: desc}} + + for ports,sources in open_ports.items(): + for source in sorted(sources.keys()): + sgs = [] + for sg_id in sorted(sources[source].keys()): + output = sg_id + if sources[source][sg_id]: + output = f"{output} ({sources[source][sg_id]})" + sgs.append(output) + print("{: <40} {: <11} {: <70} {}".format(db['DBInstanceIdentifier'], ports, source, ", ".join(sgs))) + if mode == 'by_sg': + for sg,dbs in dbs_by_sg.items(): + print("{: <70} {: <4} {}".format(sg, len(dbs), ", ".join(dbs))) + +if __name__ == '__main__': + command() diff --git a/util/rds_sgs/requirements.txt b/util/rds_sgs/requirements.txt new file mode 100644 index 00000000000..3debd504d24 --- /dev/null +++ b/util/rds_sgs/requirements.txt @@ -0,0 +1,2 @@ +boto3==1.9.2 +click==6.7 diff --git a/util/s3_acl.py b/util/s3_acl.py new file mode 100644 index 00000000000..581338515e8 --- /dev/null +++ b/util/s3_acl.py @@ -0,0 +1,193 @@ +#!/usr/bin/python3 +""" +Get current ACL of all objects in given S3 bucket or set them to private or revert back. +Script supports 3 operations +1- getacl +2- setaclprivate +3- revertacl + +1 optional parameter +exclude (optional) (provide multiple parameters to filter out) + +It saves current ACL in a file named bucketname.txt for updating or reverting purposes. + +python s3_acl.py --bucketname --operation getacl --exclude + +Should assume role to run this script. +""" + + +import boto3 +from botocore.exceptions import ClientError +import backoff +import sys +import json +import click +import logging + +MAX_TRIES = 5 +region = "us-east-1" +# Set logging configuration +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +# create file handler that logs messages +filehandler = logging.FileHandler('s3_acl.log') +filehandler.setLevel(logging.INFO) +formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') +filehandler.setFormatter(formatter) +# add the handlers to logger +logger.addHandler(filehandler) + + +class S3BotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("s3", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def get_object(self, *args, **kwargs): + return self.client.list_objects_v2(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def get_acl(self, *args, **kwargs): + return self.client.get_object_acl(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def put_acl(self, *args, **kwargs): + return self.client.put_object_acl(*args, **kwargs) + + +def get_all_s3_keys(s3_bucket, region, exclude): + """Get a list of all keys in an S3 bucket.""" + keys = [] + kwargs = {'Bucket': s3_bucket} + while True: + s3_client = S3BotoWrapper(region_name=region) + resp = s3_client.get_object(**kwargs) + for obj in resp['Contents']: + # Filter out directories, you can add more filters here if required. + if obj['Key'][-1] == '/' or any(obj['Key'].startswith(filter_object) for filter_object in exclude): + continue + else: + keys.append(obj['Key']) + try: + kwargs['ContinuationToken'] = resp['NextContinuationToken'] + except KeyError: + break + return keys + + +def set_acl_private(acl_list, bucket_name, exclude): + s3_client = S3BotoWrapper(region_name=region) + for item in acl_list: + for key, value in item.items(): + if any(key.startswith(filter_object) for filter_object in exclude): + continue + else: + try: + s3_client.put_acl( + ACL='private', + Bucket=bucket_name, + Key=key, + ) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchKey': + logger.warning("No such key in S3: " + key) # Will send the errors to the file + else: + logger.error(f"Unexpected error :{e}") + sys.exit(1) + + +def revert_s3_acl(acl_list, bucket_name, exclude): + s3_client = S3BotoWrapper(region_name=region) + for item in acl_list: + for key, value in item.items(): + if any(key.startswith(filter_object) for filter_object in exclude): + continue + else: + try: + value.pop('ResponseMetadata', None) + s3_client.put_acl( + AccessControlPolicy=value, + Bucket=bucket_name, + Key=key, + ) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchKey': + logger.warning("No such key in S3: " + key) # Will send the errors to the file + else: + logger.error(f"Unexpected error :{e}") + sys.exit(1) + + +def get_s3_acl(s3_bucket, exclude): + s3_client = S3BotoWrapper(region_name=region) + response_list = [] + try: + s3_objects_key = get_all_s3_keys(s3_bucket, region, exclude) + except ClientError as e: + logger.error(f"Unable to connect to AWS with error :{e}") + sys.exit(1) + for object_key in s3_objects_key: + try: + temp = {} + response = s3_client.get_acl(Bucket=s3_bucket, Key=object_key) + temp[object_key] = response + response_list.append(temp) + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + logger.warning("You Don't have permission to access this object: " + object_key) + elif e.response['Error']['Code'] == 'NoSuchKey': + logger.warning("No such key in S3: " + object_key) # Will send the errors to the file + else: + logger.error(f"Unexpected error :{e}") + sys.exit(1) + return response_list + + +@click.command() +@click.option('--bucketname', required=True, help='S3 bucket name') +@click.option('--operation', required=True, help='Operation name to perform i.e 1- getacl 2- setaclprivate 3- revertacl') +@click.option('--exclude', '-i', multiple=True, help='S3 objects name to avoid') +def controller(bucketname, operation, exclude): + file_to_write = bucketname + ".txt" + if operation == 'getacl': + objects_acl = get_s3_acl(bucketname, exclude) + with open(file_to_write, 'w') as fout: + json.dump(objects_acl, fout) + logger.info("Task completed. Total numbers of objects read are: " + str(len(objects_acl))) + elif operation == 'setaclprivate': + try: + data = [] + with open(file_to_write) as inFile: + data = json.load(inFile) + set_acl_private(data, bucketname, exclude) + logger.info("Task completed. ACL of " + bucketname + " objects set to private.") + except OSError: + logger.error("File not accessible") + sys.exit(1) + elif operation == 'revertacl': + try: + data = [] + with open(file_to_write) as inFile: + data = json.load(inFile) + revert_s3_acl(data, bucketname, exclude) + logger.info("Task completed. ACL of " + bucketname + " objects reverted to given state") + except OSError: + logger.error("File not accessible") + sys.exit(1) + else: + logger.error("Invalid Operation. Please enter valid operation. Operation supported are i.e 1- getacl " + "2- setaclprivate 3- revertacl ") # Will send the errors to the file + sys.exit(0) + + +if __name__ == '__main__': + controller() + diff --git a/util/s3_obj_acl.py b/util/s3_obj_acl.py new file mode 100644 index 00000000000..9b08111b882 --- /dev/null +++ b/util/s3_obj_acl.py @@ -0,0 +1,101 @@ +import csv +import sys +import urllib.parse +import logging +from botocore.exceptions import ClientError +import backoff +import click +import boto3 +import concurrent.futures + + +MAX_TRIES = 5 +inconsistent_acl_objects = [] +consistent_acl_objects = [] + + +# logging config + +# Set logging configuration +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +# create file handler that logs messages +filehandler = logging.FileHandler('result.txt') +filehandler.setLevel(logging.INFO) +formatter = logging.Formatter( + '%(message)s') +filehandler.setFormatter(formatter) +# add the handlers to logger +logger.addHandler(filehandler) + + +class S3BotoWrapper: + def __init__(self): + self.client = boto3.client('s3') + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def get_object_acl(self, bucket_name, obj_key): + return self.client.get_object_acl(Bucket=bucket_name, Key=obj_key) + + +def get_object_acl(bucket_name, object_name): + client = S3BotoWrapper() + try: + # Try encoding + object_name_decoded = urllib.parse.unquote(object_name) + # Get the ACL for the object + response = client.get_object_acl(bucket_name, object_name_decoded) + acl = response['Grants'] + return acl + except Exception as e: + print(f"Error retrieving ACL for {object_name_decoded}: {str(e)}") + return None + + +def check_acl_uniformity(object_acls): + # Extract the first object's ACL as the baseline + baseline_acl = list(object_acls.values())[0] + # for acl in object_acls.values(): + for key, acl in object_acls.items(): + if acl != baseline_acl: + acl_dict = {key: acl} + inconsistent_acl_objects.append(acl_dict) + # return False + else: + acl_cons_dict = {key: acl} + consistent_acl_objects.append(acl_cons_dict) + + return True + + +def read_csv_file(file_path): + object_acls = {} + with open(file_path, 'r') as file: + reader = csv.reader(file) + objects = list(reader) + with concurrent.futures.ProcessPoolExecutor() as executor: + futures = {executor.submit(get_object_acl, obj[0], obj[1]): obj for obj in objects} + + for future in concurrent.futures.as_completed(futures): + obj = futures[future] + acl = future.result() + object_acls[(obj[0], obj[1])] = acl + + return object_acls + + +@click.command() +@click.option('--file_name', required=True, help='Use to identify the file name') +def controller(file_name): + obj_dict = read_csv_file(file_name) + is_acl_uniform = check_acl_uniformity(obj_dict) + logger.info("Objects with same acl") + for obj in consistent_acl_objects: + logger.info(obj) + logger.info("\n\nObjects with different acl") + for in_obj in inconsistent_acl_objects: + logger.info(in_obj) + + +if __name__ == '__main__': + controller() diff --git a/util/sync_hooks.sh b/util/sync_hooks.sh deleted file mode 100755 index 13e16c8a420..00000000000 --- a/util/sync_hooks.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# A small utility to symlink the files from git-hooks/ with filenames ending -# like .in into the directory .git/hooks/ -# -# It's intended this be run once near the start of a project by hand, and then -# subsequently a hook that it installs keeps it running at project checkouts. - - -# Save current directory so we can come back; change to repo root -STARTED_FROM=`pwd` -cd $(git rev-parse --show-toplevel) - -# Sync git-hooks directory entries into .git/hooks/ -for file in git-hooks/*.in; do - filepart=`basename $file .in` - if [ -e .git/hooks/$filepart -a ! -L .git/hooks/$filepart ]; then - echo ".git/hooks/$filepart not link-managed; bailing..." - echo "please examine your .git/hooks/ directory and repair inconsistencies manually" - cd $STARTED_FROM - exit 1 - else - ln -v -s -f `pwd`/$file .git/hooks/$filepart - fi -done - -# Ok, everything went well; restore previous context -cd $STARTED_FROM -exit 0 diff --git a/util/tableau/installer.sh b/util/tableau/installer.sh new file mode 100644 index 00000000000..af616c0db09 --- /dev/null +++ b/util/tableau/installer.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +# Tableau server installer. +# Usage: +## installer.sh ADMIN_USER ADMIN_PASS TABLEAU_ADMIN_USER TABLEAU_ADMIN_PASS +## +## Example: +## ./installer.sh ubuntu-user ubuntu-pass test-user test-pass +## + +ADMIN_USER=$1 +ADMIN_PASS=$2 +TABLEAU_ADMIN_USER=$3 +TABLEAU_ADMIN_PASS=$4 + +mkdir tableau +cd tableau/ +git clone https://github.com/tableau/server-install-script-samples.git +cd server-install-script-samples/linux/automated-installer/ +wget https://downloads.tableau.com/tssoftware/tableau-server-2020-4-1_amd64.deb + +cat > secrets <<- EOM +# You can use this as a template for the secrets file used with the +# automated-installer script. +# +# Note: If you do not enter the tsm_admin_pass or the +# tableau_server_admin_pass in this file, you will be prompted to enter this +# information during installation. However, you must enter the account names +# for tsm_admin_user and tableau_server_admin_user. + +# Credentials for the account that is running the automated-installer script. +# This account will be added to the 'tsmadmin' group. The 'tsmadmin' group is +# created during the Tableau installation process. Members of the 'tsmadmin' +# group can run TSM commands. +# +tsm_admin_user="$TABLEAU_ADMIN_USER" +tsm_admin_pass="$TABLEAU_ADMIN_PASS" + +# Enter a username and password to create the initial Tableau administrator +# account. This account will be created in Tableau Server by the installation +# process and will have Tableau Server administrator rights. The user account +# will be local to Tableau Server and will not be a Linux OS account. If you +# are using LDAP or AD for authentication, then the account you specify for +# the Tableau administrator must be a valid account from the directory service. +# +tableau_server_admin_user="$ADMIN_USER" +tableau_server_admin_pass="$ADMIN_PASS" +EOM + +sudo ./automated-installer -s secrets -f config.json -r registration.json --accepteula tableau-server-2020-4-1_amd64.deb diff --git a/util/vpc-tools/abbey.py b/util/vpc-tools/abbey.py deleted file mode 100644 index c7cd9a44e6e..00000000000 --- a/util/vpc-tools/abbey.py +++ /dev/null @@ -1,711 +0,0 @@ -#!/usr/bin/env python -u -import sys -from argparse import ArgumentParser -import time -import json -import yaml -import os -try: - import boto.ec2 - import boto.sqs - from boto.vpc import VPCConnection - from boto.exception import NoAuthHandlerFound, EC2ResponseError - from boto.sqs.message import RawMessage -except ImportError: - print "boto required for script" - sys.exit(1) - -from pymongo import MongoClient -from pymongo.errors import ConnectionFailure, DuplicateKeyError -from pprint import pprint - -AMI_TIMEOUT = 600 # time to wait for AMIs to complete -EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition -EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks -NUM_TASKS = 5 # number of tasks for time summary report -NUM_PLAYBOOKS = 2 - - -class MongoConnection: - - def __init__(self): - try: - mongo = MongoClient(host=args.mongo_uri) - except ConnectionFailure: - print "Unable to connect to the mongo database specified" - sys.exit(1) - - mongo_db = getattr(mongo, args.mongo_db) - if args.mongo_ami_collection not in mongo_db.collection_names(): - mongo_db.create_collection(args.mongo_ami_collection) - if args.mongo_deployment_collection not in mongo_db.collection_names(): - mongo_db.create_collection(args.mongo_deployment_collection) - self.mongo_ami = getattr(mongo_db, args.mongo_ami_collection) - self.mongo_deployment = getattr( - mongo_db, args.mongo_deployment_collection) - - def update_ami(self, ami): - """ - Creates a new document in the AMI - collection with the ami id as the - id - """ - - query = { - '_id': ami, - 'play': args.play, - 'env': args.environment, - 'deployment': args.deployment, - 'configuration_ref': args.configuration_version, - 'configuration_secure_ref': args.configuration_secure_version, - 'vars': git_refs, - } - try: - self.mongo_ami.insert(query) - except DuplicateKeyError: - if not args.noop: - print "Entry already exists for {}".format(ami) - raise - - def update_deployment(self, ami): - """ - Adds the built AMI to the deployment - collection - """ - query = {'_id': args.jenkins_build} - deployment = self.mongo_deployment.find_one(query) - try: - deployment['plays'][args.play]['amis'][args.environment] = ami - except KeyError: - msg = "Unexpected document structure, couldn't write " +\ - "to path deployment['plays']['{}']['amis']['{}']" - print msg.format(args.play, args.environment) - pprint(deployment) - if args.noop: - deployment = { - 'plays': { - args.play: { - 'amis': { - args.environment: ami, - }, - }, - }, - } - else: - raise - - self.mongo_deployment.save(deployment) - - -class Unbuffered: - """ - For unbuffered output, not - needed if PYTHONUNBUFFERED is set - """ - def __init__(self, stream): - self.stream = stream - - def write(self, data): - self.stream.write(data) - self.stream.flush() - - def __getattr__(self, attr): - return getattr(self.stream, attr) - -sys.stdout = Unbuffered(sys.stdout) - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument('--noop', action='http://github.com/store_true', - help="don't actually run the cmds", - default=False) - parser.add_argument('--secure-vars', required=False, - metavar="SECURE_VAR_FILE", - help="path to secure-vars from the root of " - "the secure repo (defaults to ansible/" - "vars/DEPLOYMENT/ENVIRONMENT-DEPLOYMENT.yml)") - parser.add_argument('--stack-name', - help="defaults to ENVIRONMENT-DEPLOYMENT", - metavar="STACK_NAME", - required=False) - parser.add_argument('-p', '--play', - help='play name without the yml extension', - metavar="PLAY", required=True) - parser.add_argument('-d', '--deployment', metavar="DEPLOYMENT", - required=True) - parser.add_argument('-e', '--environment', metavar="ENVIRONMENT", - required=True) - parser.add_argument('-v', '--verbose', action='http://github.com/store_true', - help="turn on verbosity") - parser.add_argument('--no-cleanup', action='http://github.com/store_true', - help="don't cleanup on failures") - parser.add_argument('--vars', metavar="EXTRA_VAR_FILE", - help="path to extra var file", required=False) - parser.add_argument('--refs', metavar="GIT_REFS_FILE", - help="path to a var file with app git refs", required=False) - parser.add_argument('-a', '--application', required=False, - help="Application for subnet, defaults to admin", - default="admin") - parser.add_argument('--configuration-version', required=False, - help="configuration repo branch(no hashes)", - default="master") - parser.add_argument('--configuration-secure-version', required=False, - help="configuration-secure repo branch(no hashes)", - default="master") - parser.add_argument('--configuration-secure-repo', required=False, - default="git@github.com:edx-ops/prod-secure", - help="repo to use for the secure files") - parser.add_argument('-j', '--jenkins-build', required=False, - help="jenkins build number to update") - parser.add_argument('-b', '--base-ami', required=False, - help="ami to use as a base ami", - default="ami-0568456c") - parser.add_argument('-i', '--identity', required=False, - help="path to identity file for pulling " - "down configuration-secure", - default=None) - parser.add_argument('-r', '--region', required=False, - default="us-east-1", - help="aws region") - parser.add_argument('-k', '--keypair', required=False, - default="deployment", - help="AWS keypair to use for instance") - parser.add_argument('-t', '--instance-type', required=False, - default="m1.large", - help="instance type to launch") - parser.add_argument("--role-name", required=False, - default="abbey", - help="IAM role name to use (must exist)") - parser.add_argument("--msg-delay", required=False, - default=5, - help="How long to delay message display from sqs " - "to ensure ordering") - parser.add_argument("--mongo-uri", required=False, - default=None, - help="Mongo uri for the host that contains" - "the AMI collection") - parser.add_argument("--mongo-db", required=False, - default="test", - help="Mongo database") - parser.add_argument("--mongo-ami-collection", required=False, - default="amis", - help="Mongo ami collection") - parser.add_argument("--mongo-deployment-collection", required=False, - default="deployment", - help="Mongo deployment collection") - - return parser.parse_args() - - -def get_instance_sec_group(vpc_id): - - security_group_id = None - - grp_details = ec2.get_all_security_groups( - filters={ - 'vpc_id':vpc_id, - 'tag:play': args.play - } - ) - - if len(grp_details) < 1: - sys.stderr.write("ERROR: Expected atleast one security group, got {}\n".format( - len(grp_details))) - - return grp_details[0].id - - -def create_instance_args(): - """ - Looks up security group, subnet - and returns arguments to pass into - ec2.run_instances() including - user data - """ - - vpc = VPCConnection() - subnet = vpc.get_all_subnets( - filters={ - 'tag:aws:cloudformation:stack-name': stack_name, - 'tag:play': args.play} - ) - if len(subnet) < 1: - sys.stderr.write("ERROR: Expected at least one subnet, got {}\n".format( - len(subnet))) - sys.exit(1) - subnet_id = subnet[0].id - vpc_id = subnet[0].vpc_id - - security_group_id = get_instance_sec_group(vpc_id) - - if args.identity: - config_secure = 'true' - with open(args.identity) as f: - identity_file = f.read() - else: - config_secure = 'false' - identity_file = "dummy" - - user_data = """#!/bin/bash -set -x -set -e -exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 -base_dir="/var/tmp/edx-cfg" -extra_vars="$base_dir/extra-vars-$$.yml" -secure_identity="$base_dir/secure-identity" -git_ssh="$base_dir/git_ssh.sh" -configuration_version="{configuration_version}" -configuration_secure_version="{configuration_secure_version}" -environment="{environment}" -deployment="{deployment}" -play="{play}" -config_secure={config_secure} -git_repo_name="configuration" -git_repo="https://github.com/edx/$git_repo_name" -git_repo_secure="{configuration_secure_repo}" -git_repo_secure_name="{configuration_secure_repo_basename}" -secure_vars_file="$base_dir/$git_repo_secure_name/{secure_vars}" -instance_id=\\ -$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null) -instance_ip=\\ -$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null) -instance_type=\\ -$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null) -playbook_dir="$base_dir/configuration/playbooks/edx-east" - -if $config_secure; then - git_cmd="env GIT_SSH=$git_ssh git" -else - git_cmd="git" -fi - -ANSIBLE_ENABLE_SQS=true -SQS_NAME={queue_name} -SQS_REGION=us-east-1 -SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]" -PYTHONUNBUFFERED=1 - -# environment for ansible -export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED - -if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then - echo "Installing pkg dependencies" - /usr/bin/apt-get update - /usr/bin/apt-get install -y git python-pip python-apt \\ - git-core build-essential python-dev libxml2-dev \\ - libxslt-dev curl --force-yes -fi - - -rm -rf $base_dir -mkdir -p $base_dir -cd $base_dir - -cat << EOF > $git_ssh -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@" -EOF - -chmod 755 $git_ssh - -if $config_secure; then - cat << EOF > $secure_identity -{identity_file} -EOF -fi - -cat << EOF >> $extra_vars ---- -# extra vars passed into -# abbey.py including versions -# of all the repositories -{extra_vars_yml} - -{git_refs_yml} - -# The private key used for pulling down -# private edx-platform repos is the same -# identity of the github huser that has -# access to the secure vars repo. -# EDXAPP_USE_GIT_IDENTITY needs to be set -# to true in the extra vars for this -# variable to be used. -EDXAPP_LOCAL_GIT_IDENTITY: $secure_identity - -# abbey will always run fake migrations -# this is so that the application can come -# up healthy -fake_migrations: true -EOF - -chmod 400 $secure_identity - -$git_cmd clone $git_repo $git_repo_name -cd $git_repo_name -$git_cmd checkout $configuration_version -cd $base_dir - -if $config_secure; then - $git_cmd clone $git_repo_secure $git_repo_secure_name - cd $git_repo_secure_name - $git_cmd checkout $configuration_secure_version - cd $base_dir -fi - -cd $base_dir/$git_repo_name -sudo pip install -r requirements.txt - -cd $playbook_dir - -ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$secure_vars_file -e@$extra_vars -ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml -e@$secure_vars_file -e@$extra_vars - -rm -rf $base_dir - - """.format( - configuration_version=args.configuration_version, - configuration_secure_version=args.configuration_secure_version, - configuration_secure_repo=args.configuration_secure_repo, - configuration_secure_repo_basename=os.path.basename( - args.configuration_secure_repo), - environment=args.environment, - deployment=args.deployment, - play=args.play, - config_secure=config_secure, - identity_file=identity_file, - queue_name=run_id, - extra_vars_yml=extra_vars_yml, - git_refs_yml=git_refs_yml, - secure_vars=secure_vars) - - ec2_args = { - 'security_group_ids': [security_group_id], - 'subnet_id': subnet_id, - 'key_name': args.keypair, - 'image_id': args.base_ami, - 'instance_type': args.instance_type, - 'instance_profile_name': args.role_name, - 'user_data': user_data, - - } - - return ec2_args - - -def poll_sqs_ansible(): - """ - Prints events to the console and - blocks until a final STATS ansible - event is read off of SQS. - - SQS does not guarantee FIFO, for that - reason there is a buffer that will delay - messages before they are printed to the - console. - - Returns length of the ansible run. - """ - oldest_msg_ts = 0 - buf = [] - task_report = [] # list of tasks for reporting - last_task = None - completed = 0 - while True: - messages = [] - while True: - # get all available messages on the queue - msgs = sqs_queue.get_messages(attributes='All') - if not msgs: - break - messages.extend(msgs) - - for message in messages: - recv_ts = float( - message.attributes['ApproximateFirstReceiveTimestamp']) * .001 - sent_ts = float(message.attributes['SentTimestamp']) * .001 - try: - msg_info = { - 'msg': json.loads(message.get_body()), - 'sent_ts': sent_ts, - 'recv_ts': recv_ts, - } - buf.append(msg_info) - except ValueError as e: - print "!!! ERROR !!! unable to parse queue message, " \ - "expecting valid json: {} : {}".format( - message.get_body(), e) - if not oldest_msg_ts or recv_ts < oldest_msg_ts: - oldest_msg_ts = recv_ts - sqs_queue.delete_message(message) - - now = int(time.time()) - if buf: - try: - if (now - max([msg['recv_ts'] for msg in buf])) > args.msg_delay: - # sort by TS instead of recv_ts - # because the sqs timestamp is not as - # accurate - buf.sort(key=lambda k: k['msg']['TS']) - to_disp = buf.pop(0) - if 'START' in to_disp['msg']: - print '\n{:0>2.0f}:{:0>5.2f} {} : Starting "{}"'.format( - to_disp['msg']['TS'] / 60, - to_disp['msg']['TS'] % 60, - to_disp['msg']['PREFIX'], - to_disp['msg']['START']), - - elif 'TASK' in to_disp['msg']: - print "\n{:0>2.0f}:{:0>5.2f} {} : {}".format( - to_disp['msg']['TS'] / 60, - to_disp['msg']['TS'] % 60, - to_disp['msg']['PREFIX'], - to_disp['msg']['TASK']), - last_task = to_disp['msg']['TASK'] - elif 'OK' in to_disp['msg']: - if args.verbose: - print "\n" - for key, value in to_disp['msg']['OK'].iteritems(): - print " {:<15}{}".format(key, value) - else: - invocation = to_disp['msg']['OK']['invocation'] - module = invocation['module_name'] - # 'set_fact' does not provide a changed value. - if module == 'set_fact': - changed = "OK" - elif to_disp['msg']['OK']['changed']: - changed = "*OK*" - else: - changed = "OK" - print " {}".format(changed), - task_report.append({ - 'TASK': last_task, - 'INVOCATION': to_disp['msg']['OK']['invocation'], - 'DELTA': to_disp['msg']['delta'], - }) - elif 'FAILURE' in to_disp['msg']: - print " !!!! FAILURE !!!!", - for key, value in to_disp['msg']['FAILURE'].iteritems(): - print " {:<15}{}".format(key, value) - raise Exception("Failed Ansible run") - elif 'STATS' in to_disp['msg']: - print "\n{:0>2.0f}:{:0>5.2f} {} : COMPLETE".format( - to_disp['msg']['TS'] / 60, - to_disp['msg']['TS'] % 60, - to_disp['msg']['PREFIX']) - - # Since 3 ansible plays get run. - # We see the COMPLETE message 3 times - # wait till the last one to end listening - # for new messages. - completed += 1 - if completed >= NUM_PLAYBOOKS: - return (to_disp['msg']['TS'], task_report) - except KeyError: - print "Failed to print status from message: {}".format(to_disp) - - if not messages: - # wait 1 second between sqs polls - time.sleep(1) - - -def create_ami(instance_id, name, description): - - params = {'instance_id': instance_id, - 'name': name, - 'description': description, - 'no_reboot': True} - - image_id = ec2.create_image(**params) - - for _ in xrange(AMI_TIMEOUT): - try: - img = ec2.get_image(image_id) - if img.state == 'available': - break - else: - time.sleep(1) - except EC2ResponseError as e: - if e.error_code == 'InvalidAMIID.NotFound': - time.sleep(1) - else: - raise Exception("Unexpected error code: {}".format( - e.error_code)) - time.sleep(1) - else: - raise Exception("Timeout waiting for AMI to finish") - - return image_id - - -def launch_and_configure(ec2_args): - """ - Creates an sqs queue, launches an ec2 instance, - configures it and creates an AMI. Polls - SQS for updates - """ - - print "{:<40}".format( - "Creating SQS queue and launching instance for {}:".format(run_id)) - print - for k, v in ec2_args.iteritems(): - if k != 'user_data': - print " {:<25}{}".format(k, v) - print - - global sqs_queue - global instance_id - sqs_queue = sqs.create_queue(run_id) - sqs_queue.set_message_class(RawMessage) - res = ec2.run_instances(**ec2_args) - inst = res.instances[0] - instance_id = inst.id - - print "{:<40}".format( - "Waiting for instance {} to reach running status:".format(instance_id)), - status_start = time.time() - for _ in xrange(EC2_RUN_TIMEOUT): - res = ec2.get_all_instances(instance_ids=[instance_id]) - if res[0].instances[0].state == 'running': - status_delta = time.time() - status_start - run_summary.append(('EC2 Launch', status_delta)) - print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( - status_delta / 60, - status_delta % 60) - break - else: - time.sleep(1) - else: - raise Exception("Timeout waiting for running status: {} ".format( - instance_id)) - - print "{:<40}".format("Waiting for system status:"), - system_start = time.time() - for _ in xrange(EC2_STATUS_TIMEOUT): - status = ec2.get_all_instance_status(inst.id) - if status[0].system_status.status == u'ok': - system_delta = time.time() - system_start - run_summary.append(('EC2 Status Checks', system_delta)) - print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( - system_delta / 60, - system_delta % 60) - break - else: - time.sleep(1) - else: - raise Exception("Timeout waiting for status checks: {} ".format( - instance_id)) - - print - print "{:<40}".format( - "Waiting for user-data, polling sqs for Ansible events:") - - (ansible_delta, task_report) = poll_sqs_ansible() - run_summary.append(('Ansible run', ansible_delta)) - print - print "{} longest Ansible tasks (seconds):".format(NUM_TASKS) - for task in sorted( - task_report, reverse=True, - key=lambda k: k['DELTA'])[:NUM_TASKS]: - print "{:0>3.0f} {}".format(task['DELTA'], task['TASK']) - print " - {}".format(task['INVOCATION']) - print - - print "{:<40}".format("Creating AMI:"), - ami_start = time.time() - ami = create_ami(instance_id, run_id, run_id) - ami_delta = time.time() - ami_start - print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( - ami_delta / 60, - ami_delta % 60) - run_summary.append(('AMI Build', ami_delta)) - total_time = time.time() - start_time - all_stages = sum(run[1] for run in run_summary) - if total_time - all_stages > 0: - run_summary.append(('Other', total_time - all_stages)) - run_summary.append(('Total', total_time)) - - return run_summary, ami - -if __name__ == '__main__': - - args = parse_args() - - run_summary = [] - - start_time = time.time() - - if args.vars: - with open(args.vars) as f: - extra_vars_yml = f.read() - extra_vars = yaml.load(extra_vars_yml) - else: - extra_vars_yml = "" - extra_vars = {} - - if args.refs: - with open(args.refs) as f: - git_refs_yml = f.read() - git_refs = yaml.load(git_refs_yml) - else: - git_refs_yml = "" - git_refs = {} - - if args.secure_vars: - secure_vars = args.secure_vars - else: - secure_vars = "ansible/vars/{}/{}-{}.yml".format( - args.environment, args.environment, args.deployment) - if args.stack_name: - stack_name = args.stack_name - else: - stack_name = "{}-{}".format(args.environment, args.deployment) - - try: - sqs = boto.sqs.connect_to_region(args.region) - ec2 = boto.ec2.connect_to_region(args.region) - except NoAuthHandlerFound: - print 'You must be able to connect to sqs and ec2 to use this script' - sys.exit(1) - - if args.mongo_uri: - mongo_con = MongoConnection() - - try: - sqs_queue = None - instance_id = None - - run_id = "{}-abbey-{}-{}-{}".format( - int(time.time() * 100), args.environment, args.deployment, args.play) - - ec2_args = create_instance_args() - - if args.noop: - print "Would have created sqs_queue with id: {}\nec2_args:".format( - run_id) - pprint(ec2_args) - ami = "ami-00000" - else: - run_summary, ami = launch_and_configure(ec2_args) - print - print "Summary:\n" - - for run in run_summary: - print "{:<30} {:0>2.0f}:{:0>5.2f}".format( - run[0], run[1] / 60, run[1] % 60) - print "AMI: {}".format(ami) - if args.mongo_uri: - mongo_con.update_ami(ami) - mongo_con.update_deployment(ami) - finally: - print - if not args.no_cleanup and not args.noop: - if sqs_queue: - print "Cleaning up - Removing SQS queue - {}".format(run_id) - sqs.delete_queue(sqs_queue) - if instance_id: - print "Cleaning up - Terminating instance ID - {}".format( - instance_id) - # Check to make sure we have an instance id. - if instance_id: - ec2.terminate_instances(instance_ids=[instance_id]) diff --git a/util/vpc-tools/asg_lifcycle_watcher.py b/util/vpc-tools/asg_lifcycle_watcher.py new file mode 100644 index 00000000000..dc4e72ab419 --- /dev/null +++ b/util/vpc-tools/asg_lifcycle_watcher.py @@ -0,0 +1,205 @@ +__author__ = 'e0d' + +""" +Retrieves AWS Auto-scaling lifecycle messages from and SQS queue and processes them. For +the LifeCycleTransition type of autoscaling:EC2_INSTANCE_TERMINATING, ec2 instances are inspected +for an ok_to_retire tag. If that tag exists, the termination state transition is continued, if not, the +lifecycle timeout is extended. + +Because the lifecycle commands are not yet available in boto, these commands are, unfortunately, +run via a subprocess call to the awscli. This should be fixed when boto is updated. + +This script is meant to be run periodically via some process automation, say, Jenkins. + +It relies on some component applying the proper tags and performing pre-retirement activities. + +./sqs.py -q autoscaling-lifecycle-queue -b /home/you/.virtualenvs/aws/bin --hook MyLifeCycleHook +""" + +import argparse +import boto3 +import json +import subprocess +import logging +import os +from distutils import spawn + +class MissingHostError(Exception): + pass + +class LifecycleHandler: + + INSTANCE_TERMINATION = 'autoscaling:EC2_INSTANCE_TERMINATING' + TEST_NOTIFICATION = 'autoscaling:TEST_NOTIFICATION' + NUM_MESSAGES = 10 + WAIT_TIME_SECONDS = 1 + VISIBILITY_TIMEOUT = 10 + + def __init__(self, region, queue, hook, dry_run, bin_directory=None): + logging.basicConfig(level=logging.INFO) + self.queue = queue + self.hook = hook + self.region = region + if bin_directory: + os.environ["PATH"] = bin_directory + os.pathsep + os.environ["PATH"] + self.aws_bin = spawn.find_executable('aws') + self.python_bin = spawn.find_executable('python') + + self.base_cli_command ="{python_bin} {aws_bin} ".format( + python_bin=self.python_bin, + aws_bin=self.aws_bin) + + if self.region: + self.base_cli_command += f"--region {self.region} " + + self.dry_run = args.dry_run + self.ec2_con = boto3.client('ec2',region_name=self.region) + self.sqs_con = boto3.client('sqs',region_name=self.region) + + def process_lifecycle_messages(self): + queue_url = self.sqs_con.get_queue_url(http://github.com/QueueName=self.queue)['QueueUrl'] + queue = boto3.resource('sqs', region_name=self.region).Queue(queue_url) + + for sqs_message in self.sqs_con.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=LifecycleHandler.NUM_MESSAGES, VisibilityTimeout=LifecycleHandler.VISIBILITY_TIMEOUT, + WaitTimeSeconds=LifecycleHandler.WAIT_TIME_SECONDS).get('Messages', []): + body = json.loads(sqs_message['Body']) + as_message = json.loads(body['Message']) + logging.info(f"Proccessing message {as_message}.") + + if 'LifecycleTransition' in as_message and as_message['LifecycleTransition'] \ + == LifecycleHandler.INSTANCE_TERMINATION: + # Convenience vars, set here to avoid messages that don't meet the criteria in + # the if condition above. + instance_id = as_message['EC2InstanceId'] + asg = as_message['AutoScalingGroupName'] + token = as_message['LifecycleActionToken'] + + try: + + if self.verify_ok_to_retire(as_message['EC2InstanceId']): + + logging.info("Host is marked as OK to retire, retiring {instance}".format( + instance=instance_id)) + + self.continue_lifecycle(asg, token, self.hook) + + self.delete_sqs_message(queue, sqs_message, as_message, self.dry_run) + + else: + logging.info("Recording lifecycle heartbeat for instance {instance}".format( + instance=instance_id)) + + self.record_lifecycle_action_heartbeat(asg, token, self.hook) + except MissingHostError as mhe: + logging.exception(mhe) + # There is nothing we can do to recover from this, so we + # still delete the message + self.delete_sqs_message(queue, sqs_message, as_message, self.dry_run) + + # These notifications are sent when configuring a new lifecycle hook, they can be + # deleted safely + elif as_message['Event'] == LifecycleHandler.TEST_NOTIFICATION: + self.delete_sqs_message(queue, sqs_message, as_message, self.dry_run) + else: + raise NotImplemented("Encountered message, {message_id}, of unexpected type.".format( + message_id=as_message['MessageId'])) + + def delete_sqs_message(self, queue, sqs_message, as_message, dry_run): + if not dry_run: + logging.info(f"Deleting message with body {as_message}") + self.sqs_con.delete_message(QueueUrl=queue.url, ReceiptHandle=sqs_message['ReceiptHandle']) + else: + logging.info(f"Would have deleted message with body {as_message}") + + def record_lifecycle_action_heartbeat(self, asg, token, hook): + + command = self.base_cli_command + "autoscaling record-lifecycle-action-heartbeat " \ + "--lifecycle-hook-name {hook} " \ + "--auto-scaling-group-name {asg} " \ + "--lifecycle-action-token {token}".format( + hook=hook,asg=asg,token=token) + + self.run_subprocess_command(command, self.dry_run) + + def continue_lifecycle(self, asg, token, hook): + command = self.base_cli_command + "autoscaling complete-lifecycle-action --lifecycle-hook-name {hook} " \ + "--auto-scaling-group-name {asg} --lifecycle-action-token {token} --lifecycle-action-result " \ + "CONTINUE".format( + hook=hook, asg=asg, token=token) + + self.run_subprocess_command(command, self.dry_run) + + def run_subprocess_command(self, command, dry_run): + + message = f"Running command {command}." + + if not dry_run: + logging.info(message) + try: + output = subprocess.check_output(command.split(' ')) + logging.info(f"Output was {output}") + except Exception as e: + logging.exception(e) + raise e + else: + logging.info(f"Dry run: {message}") + + def get_ec2_instance_by_id(self, instance_id): + """ + Simple boto call to get the instance based on the instance-id + """ + reservations = self.ec2_con.describe_instances(InstanceIds=[instance_id]).get('Reservations', []) + instances = [] + if len(reservations) == 1: + instances = reservations[0].get('Instances', []) + if len(instances) == 1: + return self.ec2_con.describe_instances(InstanceIds=[instance_id])['Reservations'][0]['Instances'][0] + else: + return None + + def verify_ok_to_retire(self, instance_id): + """ + Ensure that the safe_to_retire tag has been added to the instance in question + with the value 'true' + """ + instance = self.get_ec2_instance_by_id(instance_id) + tags_dict = {} + + if instance: + tags_dict = {} + for t in instance['Tags']: + tags_dict[t['Key']] = t['Value'] + if 'safe_to_retire' in tags_dict and tags_dict['safe_to_retire'].lower() == 'true': + logging.info(f"Instance with id {instance_id} is safe to retire.") + return True + else: + logging.info(f"Instance with id {instance_id} is not safe to retire.") + return False + else: + # No instance for id in SQS message this can happen if something else + # has terminated the instances outside of this workflow + message = "Instance with id {id} is referenced in an SQS message, but does not exist.".\ + format(id=instance_id) + raise MissingHostError(message) + +if __name__=="__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-r', '--region', + help='The aws region to use ' + 'per line.',default='us-east-1') + parser.add_argument('-b', '--bin-directory', required=False, default=None, + help='The bin directory of the virtual env ' + 'from which to run the AWS cli (optional)') + parser.add_argument('-q', '--queue', required=True, + help="The SQS queue containing the lifecyle messages") + + parser.add_argument('--hook', required=True, + help="The lifecyle hook to act upon.") + + parser.add_argument('-d', "--dry-run", dest="dry_run", action="http://github.com/store_true", + help='Print the commands, but do not do anything') + parser.set_defaults(dry_run=False) + args = parser.parse_args() + + lh = LifecycleHandler(args.region, args.queue, args.hook, args.dry_run, args.bin_directory) + lh.process_lifecycle_messages() diff --git a/util/vpc-tools/create_stack.py b/util/vpc-tools/create_stack.py deleted file mode 100644 index 3d5c6ae5fbb..00000000000 --- a/util/vpc-tools/create_stack.py +++ /dev/null @@ -1,120 +0,0 @@ -import argparse -import boto -import yaml -from os.path import basename -from time import sleep -from pprint import pprint - - -FAILURE_STATES = [ - 'CREATE_FAILED', - 'ROLLBACK_IN_PROGRESS', - 'ROLLBACK_FAILED', - 'ROLLBACK_COMPLETE', - 'DELETE_IN_PROGRESS', - 'DELETE_FAILED', - 'DELETE_COMPLETE', - ] - -def upload_file(file_path, bucket_name, key_name): - """ - Upload a file to the given s3 bucket and return a template url. - """ - conn = boto.connect_s3() - try: - bucket = conn.get_bucket(bucket_name) - except boto.exception.S3ResponseError as e: - conn.create_bucket(bucket_name) - bucket = conn.get_bucket(bucket_name, validate=False) - - key = boto.s3.key.Key(bucket) - key.key = key_name - key.set_contents_from_filename(file_path) - - key.set_acl('public-read') - url = "https://s3.amazonaws.com/%7B%7D/%7B%7D".format(bucket.name, key.name) - print( "URL: {}".format(url)) - return url - -def create_stack(stack_name, template, region='us-east-1', blocking=True, - temp_bucket='edx-sandbox-devops', parameters=[], - update=False): - - cfn = boto.connect_cloudformation() - - # Upload the template to s3 - key_pattern = 'devops/cloudformation/auto/{}_{}' - key_name = key_pattern.format(stack_name, basename(template)) - template_url = upload_file(template, temp_bucket, key_name) - - # Reference the stack. - try: - if update: - stack_id = cfn.update_stack(stack_name, - template_url=template_url, - capabilities=['CAPABILITY_IAM'], - tags={'autostack':'true'}, - parameters=parameters) - else: - stack_id = cfn.create_stack(stack_name, - template_url=template_url, - capabilities=['CAPABILITY_IAM'], - tags={'autostack':'true'}, - parameters=parameters) - except Exception as e: - print(e.message) - raise e - - status = None - while blocking: - sleep(5) - stack_instance = cfn.describe_stacks(stack_id)[0] - status = stack_instance.stack_status - print(status) - if 'COMPLETE' in status: - break - - if status in FAILURE_STATES: - raise Exception('Creation Failed. Stack Status: {}, ID:{}'.format( - status, stack_id)) - - return stack_id - -def cfn_params_from(filename): - params_dict = yaml.safe_load(open(filename)) - return [ (key,value) for key,value in params_dict.items() ] - -if __name__ == '__main__': - description = 'Create a cloudformation stack from a template.' - parser = argparse.ArgumentParser(description=description) - - msg = 'Name for the cloudformation stack.' - parser.add_argument('-n', '--stackname', required=True, help=msg) - - msg = 'Pass this argument if we are updating an existing stack.' - parser.add_argument('-u', '--update', action='http://github.com/store_true') - - msg = 'Name of the bucket to use for temporarily uploading the \ - template.' - parser.add_argument('-b', '--bucketname', default="edx-sandbox-devops", - help=msg) - - msg = 'The path to the cloudformation template.' - parser.add_argument('-t', '--template', required=True, help=msg) - - msg = 'The AWS region to build this stack in.' - parser.add_argument('-r', '--region', default='us-east-1', help=msg) - - msg = 'YAML file containing stack build parameters' - parser.add_argument('-p', '--parameters', help=msg) - - args = parser.parse_args() - stack_name = args.stackname - template = args.template - region = args.region - bucket_name = args.bucketname - parameters = cfn_params_from(args.parameters) - update = args.update - - create_stack(stack_name, template, region, temp_bucket=bucket_name, parameters=parameters, update=update) - print('Stack({}) created.'.format(stack_name)) diff --git a/util/vpc-tools/requirements.txt b/util/vpc-tools/requirements.txt index 190d7af2c93..a966598293d 100644 --- a/util/vpc-tools/requirements.txt +++ b/util/vpc-tools/requirements.txt @@ -1,2 +1,20 @@ -boto -docopt \ No newline at end of file +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# make upgrade +# +boto==2.49.0 + # via -r requirements/vpc-tools.in +certifi==2023.11.17 + # via requests +charset-normalizer==3.3.2 + # via requests +docopt==0.6.2 + # via -r requirements/vpc-tools.in +idna==3.6 + # via requests +requests==2.31.0 + # via -r requirements/vpc-tools.in +urllib3==2.1.0 + # via requests diff --git a/util/vpc-tools/sanitize-db-prod_grader.sql b/util/vpc-tools/sanitize-db-prod_grader.sql new file mode 100644 index 00000000000..47a94c54699 --- /dev/null +++ b/util/vpc-tools/sanitize-db-prod_grader.sql @@ -0,0 +1,18 @@ +SET FOREIGN_KEY_CHECKS=0; + +/* + Grader has its own django core tables. +*/ + +UPDATE auth_user + set + email = concat('success+',cast(id AS CHAR),'@simulator.amazonses.com'), + username = concat('user-',cast(id AS CHAR)), + first_name = concat('user-',cast(id AS CHAR)), + last_name = concat('user-',cast(id AS CHAR)), + password = null, + last_login = null, + date_joined = null + where email not like ('%@edx.org'); + +SET FOREIGN_KEY_CHECKS=1; diff --git a/util/vpc-tools/tag-old-ebs.py b/util/vpc-tools/tag-old-ebs.py new file mode 100644 index 00000000000..5ed5b3c3683 --- /dev/null +++ b/util/vpc-tools/tag-old-ebs.py @@ -0,0 +1,207 @@ +""" + +For a given aws account, go through all un-attached volumes and tag them. + +""" +import boto +import boto.utils +import argparse +import logging +import subprocess +import time +import os +from os.path import join, exists, isdir, islink, realpath, basename, dirname +import yaml +# needs to be pip installed +import netaddr + +LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s" +TIMEOUT = 300 + +log_level = logging.INFO + +def tags_for_hostname(hostname, mapping): + logging.debug(f"Hostname is {hostname}") + if not hostname.startswith('ip-'): + return {} + + octets = hostname.lstrip('ip-').split('-') + tags = {} + + # Update with env and deployment info + tags.update(mapping['CIDR_SECOND_OCTET'][octets[1]]) + + ip_addr = netaddr.IPAddress(".".join(octets)) + for key, value in mapping['CIDR_REST'].items(): + cidr = ".".join([ + mapping['CIDR_FIRST_OCTET'], + octets[1], + key]) + + cidrset = netaddr.IPSet([cidr]) + + if ip_addr in cidrset: + tags.update(value) + + return tags + +def potential_devices(root_device): + device_dir = dirname(root_device) + relevant_devices = lambda x: x.startswith(basename(root_device)) + + all_devices = os.listdir(device_dir) + all_devices = list(filter(relevant_devices, all_devices)) + + logging.info(f"Potential devices on {root_device}: {all_devices}") + if len(all_devices) > 1: + all_devices.remove(basename(root_device)) + + return [join(device_dir, x) for x in all_devices] + +def get_tags_for_disk(mountpoint): + tag_data = {} + # Look at some files on it to determine: + # - hostname + # - environment + # - deployment + # - cluster + # - instance-id + # - date created + hostname_file = join(mountpoint, "etc", "hostname") + edx_dir = join(mountpoint, 'edx', 'app') + if exists(hostname_file): + # This means this was a root volume. + with open(hostname_file) as f: + hostname = f.readline().strip() + tag_data['hostname'] = hostname + + if exists(edx_dir) and isdir(edx_dir): + # This is an ansible related ami, we'll try to map + # the hostname to a knows deployment and cluster. + cluster_tags = tags_for_hostname(hostname, mappings) + tag_data.update(cluster_tags) + else: + # Not an ansible created root volume. + tag_data['cluster'] = 'unknown' + else: + # Not a root volume + tag_data['cluster'] = "unknown" + + instance_file = join(mountpoint, "var", "lib", "cloud", "instance") + if exists(instance_file) and islink(instance_file): + resolved_path = realpath(instance_file) + old_instance_id = basename(resolved_path) + tag_data['instance-id'] = old_instance_id + + return tag_data + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Tag unattached ebs volumes.") + parser.add_argument("--profile", '-p', + help="AWS Profile to use with boto.") + parser.add_argument("--noop", "-n", action="http://github.com/store_true", + help="Don't actually tag anything.") + parser.add_argument("--verbose", "-v", action="http://github.com/store_true", + help="More verbose output.") + parser.add_argument("--device", "-d", default="/dev/xvdf", + help="The /dev/??? where the volume should be mounted.") + parser.add_argument("--mountpoint", "-m", default="/mnt", + help="Location to mount the new device.") + parser.add_argument("--config", "-c", required=True, + help="Configuration to map hostnames to tags.") + # The config should specify what tags to associate with the second + # and this octet of the hostname which should be the ip address. + # example: + + args = parser.parse_args() + + mappings = yaml.safe_load(open(args.config)) + + # Setup Logging + if args.verbose: + log_level = logging.DEBUG + + logging.basicConfig(format=LOG_FORMAT, level=log_level) + + # setup boto + ec2 = boto.connect_ec2(profile_name=args.profile) + + # get mounting args + id_info = boto.utils.get_instance_identity()['document'] + instance_id = id_info['instanceId'] + az = id_info['availabilityZone'] + root_device = args.device + mountpoint = args.mountpoint + + # Find all unattached volumes + filters = { "status": "available", "availability-zone": az } + potential_volumes = ec2.get_all_volumes(filters=filters) + logging.debug(f"Found {len(potential_volumes)} unattached volumes in {az}") + + for vol in potential_volumes: + if "cluster" in vol.tags: + continue + # Attach volume to the instance running this process + logging.debug("Trying to attach {} to {} at {}".format( + vol.id, instance_id, root_device)) + + try: + ec2.attach_volume(vol.id, instance_id, root_device) + # Wait for the volume to finish attaching. + waiting_msg = "Waiting for {} to be available at {}" + timeout = TIMEOUT + while not exists(root_device): + time.sleep(2) + logging.debug(waiting_msg.format(vol.id, root_device)) + timeout -= 2 + if timeout <= 0: + logging.critical(f"Timed out while attaching {vol.id}.") + exit(1) + + + # Because a volume might have multiple mount points + devices_on_volume = potential_devices(root_device) + if len(devices_on_volume) != 1: + vol.add_tag("devices_on_volume", str(devices_on_volume)) + # Don't tag in this case because the different devices + # may have conflicting tags. + logging.info(f"Skipping {vol.id} because it has multiple mountpoints.") + logging.info(f"{vol.id} has mountpoints {str(devices_on_volume)}") + else: + device = devices_on_volume[0] + try: + # Mount the volume + subprocess.check_call(["sudo", "mount", device, mountpoint]) + + # Learn all tags we can know from content on disk. + tag_data = get_tags_for_disk(mountpoint) + tag_data['created'] = vol.create_time + + # If they are found tag the instance with them + if args.noop: + logging.info(f"Would have tagged {vol.id} with: \n{str(tag_data)}") + else: + logging.info(f"Tagging {vol.id} with: \n{str(tag_data)}") + vol.add_tags(tag_data) + finally: + # Un-mount the volume + subprocess.check_call(['sudo', 'umount', mountpoint]) + finally: + # Need this to be a function so we always re-check the API for status. + is_attached = lambda vol_id: ec2.get_all_volumes(vol_id)[0].status != "available" + timeout = TIMEOUT + while exists(root_device) or is_attached(vol.id): + if is_attached(vol.id): + try: + # detach the volume + ec2.detach_volume(vol.id) + except boto.exception.EC2ResponseError as e: + logging.warning("Failed to detach volume. Will try again in a bit.") + time.sleep(2) + timeout -= 2 + if timeout <= 0: + logging.critical(f"Timed out while detaching {vol.id}.") + exit(1) + logging.debug(f"Waiting for {vol.id} to be detached.") + diff --git a/util/vpc-tools/vpc-tools.py b/util/vpc-tools/vpc-tools.py deleted file mode 100644 index 8c5b2d883e4..00000000000 --- a/util/vpc-tools/vpc-tools.py +++ /dev/null @@ -1,139 +0,0 @@ -"""VPC Tools. - -Usage: - vpc-tools.py ssh-config (vpc | stack-name ) identity-file user [(config-file )] [(strict-host-check )] - vpc-tools.py (-h --help) - vpc-tools.py (-v --version) - -Options: - -h --help Show this screen. - -v --version Show version. - -""" -import boto -from docopt import docopt -from vpcutil import vpc_for_stack_name -from vpcutil import stack_name_for_vpc -from collections import defaultdict - - -VERSION="vpc tools 0.1" -DEFAULT_USER="ubuntu" -DEFAULT_HOST_CHECK="ask" - -JUMPBOX_CONFIG = """ - Host {jump_box} - HostName {ip} - IdentityFile {identity_file} - ForwardAgent yes - User {user} - StrictHostKeyChecking {strict_host_check} - """ - -HOST_CONFIG = """ - # Instance ID: {instance_id} - Host {name} - ProxyCommand ssh {config_file} -W %h:%p {jump_box} - HostName {ip} - IdentityFile {identity_file} - ForwardAgent yes - User {user} - StrictHostKeyChecking {strict_host_check} - """ - - -def dispatch(args): - - if args.get("ssh-config"): - _ssh_config(args) - -def _ssh_config(args): - if args.get("vpc"): - vpc_id = args.get("") - stack_name = stack_name_for_vpc(vpc_id) - elif args.get("stack-name"): - stack_name = args.get("") - vpc_id = vpc_for_stack_name(stack_name) - else: - raise Exception("No vpc_id or stack_name provided.") - - vpc = boto.connect_vpc() - - identity_file = args.get("") - user = args.get("") - config_file = args.get("") - strict_host_check = args.get("") - - if not user: - user = DEFAULT_USER - - if not strict_host_check: - strict_host_check = DEFAULT_HOST_CHECK - - if config_file: - config_file = "-F {}".format(config_file) - else: - config_file = "" - - jump_box = "{stack_name}-jumpbox".format(stack_name=stack_name) - friendly = "{stack_name}-{logical_id}-{instance_number}" - id_type_counter = defaultdict(int) - - reservations = vpc.get_all_instances(filters={'vpc-id' : vpc_id}) - - for reservation in reservations: - for instance in reservation.instances: - - if 'play' in instance.tags: - logical_id = instance.tags['play'] - elif 'role' in instance.tags: - # deprecated, use "play" instead - logical_id = instance.tags['role'] - elif 'group' in instance.tags: - logical_id = instance.tags['group'] - elif 'aws:cloudformation:logical-id' in instance.tags: - logical_id = instance.tags['aws:cloudformation:logical-id'] - else: - continue - instance_number = id_type_counter[logical_id] - id_type_counter[logical_id] += 1 - - if logical_id == "BastionHost" or logical_id == 'bastion': - - print JUMPBOX_CONFIG.format( - jump_box=jump_box, - ip=instance.ip_address, - user=user, - identity_file=identity_file, - strict_host_check=strict_host_check) - - # Print host config even for the bastion box because that is how - # ansible accesses it. - print HOST_CONFIG.format( - name=instance.private_ip_address, - jump_box=jump_box, - ip=instance.private_ip_address, - user=user, - identity_file=identity_file, - config_file=config_file, - strict_host_check=strict_host_check, - instance_id=instance.id) - - #duplicating for convenience with ansible - name = friendly.format(stack_name=stack_name, - logical_id=logical_id, - instance_number=instance_number) - - print HOST_CONFIG.format( - name=name, - jump_box=jump_box, - ip=instance.private_ip_address, - user=user, - identity_file=identity_file, - config_file=config_file, - strict_host_check=strict_host_check, - instance_id=instance.id) - -if __name__ == '__main__': - args = docopt(__doc__, version=VERSION) - dispatch(args) diff --git a/util/vpc-tools/vpc_dns.py b/util/vpc-tools/vpc_dns.py index 47533acaba1..29f7b238642 100644 --- a/util/vpc-tools/vpc_dns.py +++ b/util/vpc-tools/vpc_dns.py @@ -27,29 +27,46 @@ import datetime from vpcutil import vpc_for_stack_name import xml.dom.minidom -import re +import sys -r53 = boto.connect_route53() +# These are ELBs that we do not want to create dns entries +# for because the instances attached to them are also in +# other ELBs and we want the env-deploy-play tuple which makes +# up the dns name to be unique +ELB_BAN_LIST = [ + 'Apros', +] + +# If the ELB name has the key in its name these plays +# will be used for the DNS CNAME tuple. This is used for +# commoncluster. + +ELB_PLAY_MAPPINGS = { + 'RabbitMQ': 'rabbitmq', + 'Xqueue': 'xqueue', + 'Elastic': 'elasticsearch', +} -extra_play_dns = {"edxapp":["courses","studio"]} class DNSRecord(): def __init__(self, zone, record_name, record_type, - record_ttl, record_values): + record_ttl, record_values): self.zone = zone self.record_name = record_name self.record_type = record_type self.record_ttl = record_ttl self.record_values = record_values + def add_or_update_record(dns_records): """ Creates or updates a DNS record in a hosted route53 zone """ change_set = boto.route53.record.ResourceRecordSets() + record_names = set() for record in dns_records: @@ -60,9 +77,16 @@ def add_or_update_record(dns_records): record_values: {} """.format(record.record_name, record.record_type, record.record_ttl, record.record_values) - if args.noop: - print("Would have updated DNS record:\n{}".format(status_msg)) + print(f"Would have updated DNS record:\n{status_msg}") + else: + print(f"Updating DNS record:\n{status_msg}") + + if record.record_name in record_names: + print("Unable to create record for {} with value {} because one already exists!".format( + record.record_values, record.record_name)) + sys.exit(1) + record_names.add(record.record_name) zone_id = record.zone.Id.replace("/hostedzone/", "") @@ -71,10 +95,17 @@ def add_or_update_record(dns_records): old_records = {r.name[:-1]: r for r in records} # If the record name already points to something. - # Delete the existing connection. - if record.record_name in old_records.keys(): + # Delete the existing connection. If the record has + # the same type and name skip it. + if record.record_name in list(old_records.keys()): + if record.record_name + "." == old_records[record.record_name].name and \ + record.record_type == old_records[record.record_name].type: + print("Record for {} already exists and is identical, skipping.\n".format( + record.record_name)) + continue + if args.force: - print("Deleting record:\n{}".format(status_msg)) + print(f"Deleting record:\n{status_msg}") change = change_set.add_change( 'DELETE', record.record_name, @@ -99,11 +130,12 @@ def add_or_update_record(dns_records): if args.noop: print("Would have submitted the following change set:\n") - xml_doc = xml.dom.minidom.parseString(change_set.to_xml()) - print xml_doc.toprettyxml() else: + print("Submitting the following change set:\n") + xml_doc = xml.dom.minidom.parseString(change_set.to_xml()) + print(xml_doc.toprettyxml(newl='')) # newl='' to remove extra newlines + if not args.noop: r53.change_rrsets(zone_id, change_set.to_xml()) - print("Updated DNS record:\n{}".format(status_msg)) def get_or_create_hosted_zone(zone_name): @@ -128,48 +160,51 @@ def get_or_create_hosted_zone(zone_name): return zone if not zone: - print("zone {} does not exist, creating".format(zone_name)) + print(f"zone {zone_name} does not exist, creating") ts = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%SUTC') zone = r53.create_hosted_zone( - zone_name, comment="Created by vpc_dns script - {}".format(ts)) + zone_name, comment=f"Created by vpc_dns script - {ts}") if parent_zone: - print("Updating parent zone {}".format(parent_zone_name)) + print(f"Updating parent zone {parent_zone_name}") dns_records = set() - dns_records.add(DNSRecord(parent_zone,zone_name,'NS',900,zone.NameServers)) + dns_records.add(DNSRecord(parent_zone, zone_name, 'NS', 900, zone.NameServers)) add_or_update_record(dns_records) return zone + def get_security_group_dns(group_name): # stage-edx-RabbitMQELBSecurityGroup-YB8ZKIZYN1EN - environment,deployment,sec_group,salt = group_name.split('-') - play = sec_group.replace("ELBSecurityGroup","").lower() + environment, deployment, sec_group, salt = group_name.split('-') + play = sec_group.replace("ELBSecurityGroup", "").lower() return environment, deployment, play -def get_dns_from_instances(elb): - - ec2_con = boto.connect_ec2() +def get_dns_from_instances(elb): for inst in elb.instances: - instance = ec2_con.get_all_instances( + try: + instance = ec2_con.get_all_instances( instance_ids=[inst.id])[0].instances[0] + except IndexError: + print(f"instance {inst} attached to elb {elb}") + sys.exit(1) try: env_tag = instance.tags['environment'] + deployment_tag = instance.tags['deployment'] if 'play' in instance.tags: play_tag = instance.tags['play'] else: # deprecated, for backwards compatibility play_tag = instance.tags['role'] - break # only need the first instance for tag info except KeyError: print("Instance {}, attached to elb {} does not " - "have tags for environment and play".format(elb, inst)) - raise + "have a tag for environment, play or deployment".format(inst, elb)) + sys.exit(1) - return env_tag, play_tag + return env_tag, deployment_tag, play_tag def update_elb_rds_dns(zone): @@ -182,10 +217,7 @@ def update_elb_rds_dns(zone): dns_records = set() - elb_con = boto.connect_elb() - rds_con = boto.connect_rds() - - vpc_id = vpc_for_stack_name(args.stack_name) + vpc_id = vpc_for_stack_name(args.stack_name, args.aws_id, args.aws_secret) if not zone and args.noop: # use a placeholder for zone name @@ -196,55 +228,84 @@ def update_elb_rds_dns(zone): stack_elbs = [elb for elb in elb_con.get_all_load_balancers() if elb.vpc_id == vpc_id] - for elb in stack_elbs: + env_tag, deployment_tag, play_tag = get_dns_from_instances(elb) - if "RabbitMQ" in elb.source_security_group.name or "ElasticSearch" in elb.source_security_group.name: - env_tag,deployment,play_tag = get_security_group_dns(elb.source_security_group.name) - fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name) - dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name])) - else: - env_tag,play_tag = get_dns_from_instances(elb) - fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name) - dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name])) + # Override the play tag if a substring of the elb name + # is in ELB_PLAY_MAPPINGS + + for key in ELB_PLAY_MAPPINGS.keys(): + if key in elb.name: + play_tag = ELB_PLAY_MAPPINGS[key] + break + fqdn = f"{env_tag}-{deployment_tag}-{play_tag}.{zone_name}" - if extra_play_dns.has_key(play_tag): - for name in extra_play_dns.get(play_tag): - fqdn = "{}-{}.{}".format(env_tag, name, zone_name) - dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name])) + # Skip over ELBs if a substring of the ELB name is in + # the ELB_BAN_LIST + if any(name in elb.name for name in ELB_BAN_LIST): + print(f"Skipping {elb.name} because it is on the ELB ban list") + continue + + dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [elb.dns_name])) stack_rdss = [rds for rds in rds_con.get_all_dbinstances() if hasattr(rds.subnet_group, 'vpc_id') and rds.subnet_group.vpc_id == vpc_id] # TODO the current version of the RDS API doesn't support - # looking up RDS instance tags. Hence, we are using the - # env_tag that was set via the loop over instances above. + # looking up RDS instance tags. Hence, we are using the + # env_tag and deployment_tag that was set via the loop over instances above. + + rds_endpoints = set() for rds in stack_rdss: - fqdn = "{}-{}.{}".format(env_tag,'rds', zone_name) - dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[stack_rdss[0].endpoint[0]])) + endpoint = stack_rdss[0].endpoint[0] + fqdn = "{}-{}-{}.{}".format(env_tag, deployment_tag, 'rds', zone_name) + # filter out rds instances with the same endpoints (multi-AZ) + if endpoint not in rds_endpoints: + dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [endpoint])) + rds_endpoints.add(endpoint) add_or_update_record(dns_records) if __name__ == "__main__": - description = "Give a cloudformation stack name, for an edx stack, setup \ - DNS names for the ELBs in the stack." + description = """ + + Give a cloudformation stack name, for an edx stack, setup + DNS names for the ELBs in the stack + DNS entries will be created with the following format + + --.edx.org + + """ parser = argparse.ArgumentParser(description=description) parser.add_argument('-s', '--stack-name', required=True, help="The name of the cloudformation stack.") parser.add_argument('-n', '--noop', help="Don't make any changes.", action="http://github.com/store_true", default=False) - parser.add_argument('-z', '--zone-name', default="vpc.edx.org", + parser.add_argument('-z', '--zone-name', default="edx.org", help="The name of the zone under which to " "create the dns entries.") parser.add_argument('-f', '--force', help="Force reuse of an existing name in a zone", - action="http://github.com/store_true",default=False) + action="http://github.com/store_true", default=False) + parser.add_argument('--aws-id', default=None, + help="read only aws key for fetching instance information" + "the account you wish add entries for") + parser.add_argument('--aws-secret', default=None, + help="read only aws id for fetching instance information for" + "the account you wish add entries for") args = parser.parse_args() + # Connect to ec2 using the provided credentials on the commandline + ec2_con = boto.connect_ec2(args.aws_id, args.aws_secret) + elb_con = boto.connect_elb(args.aws_id, args.aws_secret) + rds_con = boto.connect_rds(args.aws_id, args.aws_secret) + + # Connect to route53 using the user's .boto file + r53 = boto.connect_route53() + zone = get_or_create_hosted_zone(args.zone_name) update_elb_rds_dns(zone) - diff --git a/util/vpc-tools/vpcutil.py b/util/vpc-tools/vpcutil.py index 57c42ccd3c8..b2ab9ae51a3 100644 --- a/util/vpc-tools/vpcutil.py +++ b/util/vpc-tools/vpcutil.py @@ -1,21 +1,38 @@ import boto +import boto.rds2 +import boto.rds -def vpc_for_stack_name(stack_name): - cfn = boto.connect_cloudformation() +CFN_TAG_KEY = 'aws:cloudformation:stack-name' + +def vpc_for_stack_name(stack_name, aws_id=None, aws_secret=None): + cfn = boto.connect_cloudformation(aws_id, aws_secret) resources = cfn.list_stack_resources(stack_name) for resource in resources: - if resource.resource_type == 'AWS::EC2::VPC': - return resource.physical_resource_id + if resource.resource_type == 'AWS::EC2::VPC': + return resource.physical_resource_id + -def stack_name_for_vpc(vpc_name): - cfn_tag_key = 'aws:cloudformation:stack-name' - vpc = boto.connect_vpc() +def stack_name_for_vpc(vpc_name, aws_id, aws_secret): + vpc = boto.connect_vpc(aws_id, aws_secret) resource = vpc.get_all_vpcs(vpc_ids=[vpc_name])[0] - if cfn_tag_key in resource.tags: - return resource.tags[cfn_tag_key] + if CFN_TAG_KEY in resource.tags: + return resource.tags[CFN_TAG_KEY] else: - msg = "VPC({}) is not part of a cloudformation stack.".format(vpc_name) + msg = f"VPC({vpc_name}) is not part of a cloudformation stack." raise Exception(msg) - - + +def rds_subnet_group_name_for_stack_name(stack_name, region='us-east-1', aws_id=None, aws_secret=None): + # Helper function to look up a subnet group name by stack name + rds = boto.rds2.connect_to_region(region) + vpc = vpc_for_stack_name(stack_name) + for group in rds.describe_db_subnet_groups()['DescribeDBSubnetGroupsResponse']['DescribeDBSubnetGroupsResult']['DBSubnetGroups']: + if group['VpcId'] == vpc: + return group['DBSubnetGroupName'] + return None + + +def all_stack_names(region='us-east-1', aws_id=None, aws_secret=None): + vpc_conn = boto.connect_vpc(aws_id, aws_secret) + return [vpc.tags[CFN_TAG_KEY] for vpc in vpc_conn.get_all_vpcs() + if CFN_TAG_KEY in list(vpc.tags.keys())] diff --git a/vagrant/README.rst b/vagrant/README.rst index 9b84a4efde0..b5e50f637ec 100644 --- a/vagrant/README.rst +++ b/vagrant/README.rst @@ -1,14 +1,21 @@ Vagrant ======= -Vagrant instances for local development and testing. +For testing Ansible playbooks and roles, there are two directories under the ``base`` directory: -- Vagrant stacks in ``base`` create new base boxes from scratch. -- Vagrant stacks in ``release`` download a base box with most requirements already installed. The instances then update themselves with the latest versions of the application code. +- ``test_playbook`` is used for testing the playbooks in the Ansible configuration scripts. +- ``test_role`` is used for testing the roles in the Ansible configuration scripts. -If you are a developer or designer, you should use the ``release`` stacks. +To test an Ansible playbook using Vagrant: -There are two versions of the stack: +- Create/modify a playbook under ``/playbooks`` (e.g. "foo.yml") +- Export its name as the value of the environment variable ``VAGRANT_ANSIBLE_PLAYBOOK``, like this: + - ``export VAGRANT_ANSIBLE_PLAYBOOK=foo`` +- Execute ``vagrant up`` from within the ``test_playbook`` directory. -- ``fullstack`` is a production-like configuration running all the services on a single server. https://github.com/edx/configuration/wiki/edX-Production-Stack -- ``devstack`` is designed for local development. Although it uses the same system requirements as in production, it simplifies certain settings to make development more convenient. https://github.com/edx/configuration/wiki/edX-Developer-Stack +To test an Ansible role using Vagrant: + +- Create/modify a role under ``/playbooks/roles`` (e.g. "bar-role") +- Export its name as the value of the environment variable ``VAGRANT_ANSIBLE_ROLE``, like this: + - ``export VAGRANT_ANSIBLE_ROLE=bar-role`` +- Execute ``vagrant up`` from within the ``test_role`` directory. diff --git a/vagrant/base/analytics_jenkins/Vagrantfile b/vagrant/base/analytics_jenkins/Vagrantfile new file mode 100644 index 00000000000..b7619fd75d8 --- /dev/null +++ b/vagrant/base/analytics_jenkins/Vagrantfile @@ -0,0 +1,35 @@ +# -*- mode: ruby -*- + +VAGRANTFILE_API_VERSION = '2' +MEMORY = 2048 +PRIVATE_IP = ENV['VAGRANT_PRIVATE_IP'] || '192.168.33.15' + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = 'ubuntu/trusty64' + + config.vm.network 'private_network', ip: PRIVATE_IP + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provider 'virtualbox' do |vb| + vb.memory = MEMORY + end + + unless ENV['VAGRANT_NO_PORTS'] + config.vm.network :forwarded_port, guest: 8080, host: 8080 # Jenkins + end + + unless ENV['VAGRANT_JENKINS_LOCAL_VARS_FILE'] + raise 'Please set VAGRANT_JENKINS_LOCAL_VARS_FILE environment variable. '\ + 'That variable should point to a file containing variable '\ + 'overrides for analytics_jenkins role. For required overrides '\ + 'see README.rst in the analytics_jenkins role folder.' + end + + config.vm.provision :ansible do |ansible| + ansible.playbook = '../../../playbooks/analytics-jenkins.yml' + ansible.verbose = 'vvvv' + + ansible.extra_vars = ENV['VAGRANT_JENKINS_LOCAL_VARS_FILE'] + end + +end diff --git a/vagrant/base/analyticstack/Vagrantfile b/vagrant/base/analyticstack/Vagrantfile new file mode 100644 index 00000000000..def342a330b --- /dev/null +++ b/vagrant/base/analyticstack/Vagrantfile @@ -0,0 +1,95 @@ +Vagrant.require_version ">= 1.8.7" +unless Vagrant.has_plugin?("vagrant-vbguest") + raise "Please install the vagrant-vbguest plugin by running `vagrant plugin install vagrant-vbguest`" +end + +VAGRANTFILE_API_VERSION = "2" + +# Analyticstack requires more memory than Devstack. +MEMORY = 6144 +CPU_COUNT = 2 + +vm_guest_ip = "192.168.33.10" +if ENV["VAGRANT_GUEST_IP"] + vm_guest_ip = ENV["VAGRANT_GUEST_IP"] +end + +# These are versioning variables in the roles. Each can be overridden, first +# with OPENEDX_RELEASE, and then with a specific environment variable of the +# same name but upper-cased. +VERSION_VARS = [ + 'EDX_PLATFORM_VERSION', + 'CONFIGURATION_VERSION', + 'FORUM_VERSION', + 'XQUEUE_VERSION', + 'DEMO_VERSION', + 'ECOMMERCE_VERSION', + 'ECOMMERCE_WORKER_VERSION', + 'ANALYTICS_API_VERSION', + 'INSIGHTS_VERSION', +] + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + + # Creates a devstack from a base Ubuntu 16.04 image for virtualbox + config.vm.box = "bento/ubuntu-16.04" + + config.vm.network :private_network, ip: vm_guest_ip + + # If you want to run the box but don't need network ports, set VAGRANT_NO_PORTS=1. + # This is useful if you want to run more than one box at once. + if not ENV['VAGRANT_NO_PORTS'] + config.vm.network :forwarded_port, guest: 8000, host: 8000 # LMS + config.vm.network :forwarded_port, guest: 8001, host: 8001 # Studio + config.vm.network :forwarded_port, guest: 8002, host: 8002 # Ecommerce + config.vm.network :forwarded_port, guest: 8120, host: 8120 # edX Notes Service + config.vm.network :forwarded_port, guest: 8765, host: 8765 + config.vm.network :forwarded_port, guest: 9200, host: 9200 + config.vm.network :forwarded_port, guest: 18080, host: 18080 + config.vm.network :forwarded_port, guest: 8100, host: 8100 # Analytics Data API + config.vm.network :forwarded_port, guest: 8110, host: 8110 # Insights + config.vm.network :forwarded_port, guest: 50070, host: 50070 # HDFS Admin UI + config.vm.network :forwarded_port, guest: 8088, host: 8088 # Hadoop Resource Manager + end + + config.ssh.insert_key = true + + # Enable X11 forwarding so we can interact with GUI applications + if ENV['VAGRANT_X11'] + config.ssh.forward_x11 = true + end + + config.vm.provider :virtualbox do |vb| + vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s] + vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s] + end + + + # Make LC_ALL default to en_US.UTF-8 instead of en_US. + # See: https://github.com/mitchellh/vagrant/issues/1188 + config.vm.provision "shell", inline: 'echo \'LC_ALL="en_US.UTF-8"\' > /etc/default/locale' + + # Get ready for ansible on this box. + config.vm.provision "shell", path: '../../../util/install/ansible-bootstrap.sh' + + # Use vagrant-vbguest plugin to make sure Guest Additions are in sync + config.vbguest.auto_reboot = true + config.vbguest.auto_update = true + + config.vm.provision :ansible do |ansible| + ansible.playbook = "../../../playbooks/vagrant-analytics.yml" + ansible.verbose = "vvvv" + + ansible.extra_vars = {} + VERSION_VARS.each do |var| + if ENV['OPENEDX_RELEASE'] + ansible.extra_vars[var] = ENV['OPENEDX_RELEASE'] + end + env_var = var.upcase + if ENV[env_var] + ansible.extra_vars[var] = ENV[env_var] + end + end + + end +end diff --git a/vagrant/base/devstack/ansible.cfg b/vagrant/base/analyticstack/ansible.cfg similarity index 100% rename from vagrant/base/devstack/ansible.cfg rename to vagrant/base/analyticstack/ansible.cfg diff --git a/vagrant/base/cluster/Vagrantfile b/vagrant/base/cluster/Vagrantfile new file mode 100644 index 00000000000..c6371da521f --- /dev/null +++ b/vagrant/base/cluster/Vagrantfile @@ -0,0 +1,85 @@ +# -*- mode: ruby -*- + +# vi: set ft=ruby : + +VAGRANTFILE_API_VERSION = "2" + +Vagrant.require_version ">= 1.5.0" + +$script = <